mirror of https://gitee.com/openkylin/linux.git
net: aquantia: implement data PTP datapath
Here we do alloc/free IRQs for PTP rings. We also implement processing of PTP packets on TX and RX sides. Signed-off-by: Egor Pomozov <epomozov@marvell.com> Co-developed-by: Sergey Samoilenko <sergey.samoilenko@aquantia.com> Signed-off-by: Sergey Samoilenko <sergey.samoilenko@aquantia.com> Co-developed-by: Dmitry Bezrukov <dmitry.bezrukov@aquantia.com> Signed-off-by: Dmitry Bezrukov <dmitry.bezrukov@aquantia.com> Signed-off-by: Igor Russkikh <igor.russkikh@aquantia.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
61cc502ef4
commit
04a1839950
|
@ -1,7 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
|
||||
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
|
||||
*/
|
||||
|
||||
/* File aq_cfg.h: Definition of configuration parameters and constants. */
|
||||
|
@ -27,7 +27,7 @@
|
|||
|
||||
#define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2)
|
||||
|
||||
#define AQ_CFG_IRQ_MASK 0x1FFU
|
||||
#define AQ_CFG_IRQ_MASK 0x3FFU
|
||||
|
||||
#define AQ_CFG_VECS_MAX 8U
|
||||
#define AQ_CFG_TCS_MAX 8U
|
||||
|
|
|
@ -244,6 +244,12 @@ struct aq_hw_ops {
|
|||
|
||||
int (*hw_rx_tc_mode_get)(struct aq_hw_s *self, u32 *tc_mode);
|
||||
|
||||
int (*hw_ring_hwts_rx_fill)(struct aq_hw_s *self,
|
||||
struct aq_ring_s *aq_ring);
|
||||
|
||||
int (*hw_ring_hwts_rx_receive)(struct aq_hw_s *self,
|
||||
struct aq_ring_s *ring);
|
||||
|
||||
void (*hw_get_ptp_ts)(struct aq_hw_s *self, u64 *stamp);
|
||||
|
||||
int (*hw_adj_clock_freq)(struct aq_hw_s *self, s32 delta);
|
||||
|
@ -252,6 +258,12 @@ struct aq_hw_ops {
|
|||
|
||||
int (*hw_set_sys_clock)(struct aq_hw_s *self, u64 time, u64 ts);
|
||||
|
||||
u16 (*rx_extract_ts)(struct aq_hw_s *self, u8 *p, unsigned int len,
|
||||
u64 *timestamp);
|
||||
|
||||
int (*extract_hwts)(struct aq_hw_s *self, u8 *p, unsigned int len,
|
||||
u64 *timestamp);
|
||||
|
||||
int (*hw_set_fc)(struct aq_hw_s *self, u32 fc, u32 tc);
|
||||
};
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
|
||||
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
|
||||
*/
|
||||
|
||||
/* File aq_main.c: Main file for aQuantia Linux driver. */
|
||||
|
@ -10,10 +10,13 @@
|
|||
#include "aq_nic.h"
|
||||
#include "aq_pci_func.h"
|
||||
#include "aq_ethtool.h"
|
||||
#include "aq_ptp.h"
|
||||
#include "aq_filters.h"
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/udp.h>
|
||||
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_VERSION(AQ_CFG_DRV_VERSION);
|
||||
|
@ -93,6 +96,24 @@ static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
|
|||
{
|
||||
struct aq_nic_s *aq_nic = netdev_priv(ndev);
|
||||
|
||||
if (unlikely(aq_utils_obj_test(&aq_nic->flags, AQ_NIC_PTP_DPATH_UP))) {
|
||||
/* Hardware adds the Timestamp for PTPv2 802.AS1
|
||||
* and PTPv2 IPv4 UDP.
|
||||
* We have to push even general 320 port messages to the ptp
|
||||
* queue explicitly. This is a limitation of current firmware
|
||||
* and hardware PTP design of the chip. Otherwise ptp stream
|
||||
* will fail to sync
|
||||
*/
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
|
||||
unlikely((ip_hdr(skb)->version == 4) &&
|
||||
(ip_hdr(skb)->protocol == IPPROTO_UDP) &&
|
||||
((udp_hdr(skb)->dest == htons(319)) ||
|
||||
(udp_hdr(skb)->dest == htons(320)))) ||
|
||||
unlikely(eth_hdr(skb)->h_proto == htons(ETH_P_1588)))
|
||||
return aq_ptp_xmit(aq_nic, skb);
|
||||
}
|
||||
|
||||
skb_tx_timestamp(skb);
|
||||
return aq_nic_xmit(aq_nic, skb);
|
||||
}
|
||||
|
||||
|
|
|
@ -146,8 +146,11 @@ static int aq_nic_update_link_status(struct aq_nic_s *self)
|
|||
self->aq_hw->aq_link_status.mbps);
|
||||
aq_nic_update_interrupt_moderation_settings(self);
|
||||
|
||||
if (self->aq_ptp)
|
||||
if (self->aq_ptp) {
|
||||
aq_ptp_clock_init(self);
|
||||
aq_ptp_tm_offset_set(self,
|
||||
self->aq_hw->aq_link_status.mbps);
|
||||
}
|
||||
|
||||
/* Driver has to update flow control settings on RX block
|
||||
* on any link event.
|
||||
|
@ -196,6 +199,8 @@ static void aq_nic_service_task(struct work_struct *work)
|
|||
service_task);
|
||||
int err;
|
||||
|
||||
aq_ptp_service_task(self);
|
||||
|
||||
if (aq_utils_obj_test(&self->flags, AQ_NIC_FLAGS_IS_NOT_READY))
|
||||
return;
|
||||
|
||||
|
@ -408,6 +413,10 @@ int aq_nic_start(struct aq_nic_s *self)
|
|||
goto err_exit;
|
||||
}
|
||||
|
||||
err = aq_ptp_irq_alloc(self);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
if (self->aq_nic_cfg.link_irq_vec) {
|
||||
int irqvec = pci_irq_vector(self->pdev,
|
||||
self->aq_nic_cfg.link_irq_vec);
|
||||
|
@ -440,9 +449,8 @@ int aq_nic_start(struct aq_nic_s *self)
|
|||
return err;
|
||||
}
|
||||
|
||||
static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
|
||||
struct sk_buff *skb,
|
||||
struct aq_ring_s *ring)
|
||||
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
|
||||
struct aq_ring_s *ring)
|
||||
{
|
||||
unsigned int ret = 0U;
|
||||
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
|
@ -973,6 +981,8 @@ int aq_nic_stop(struct aq_nic_s *self)
|
|||
else
|
||||
aq_pci_func_free_irqs(self);
|
||||
|
||||
aq_ptp_irq_free(self);
|
||||
|
||||
for (i = 0U, aq_vec = self->aq_vec[0];
|
||||
self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
|
||||
aq_vec_stop(aq_vec);
|
||||
|
|
|
@ -54,6 +54,7 @@ struct aq_nic_cfg_s {
|
|||
#define AQ_NIC_FLAG_STOPPING 0x00000008U
|
||||
#define AQ_NIC_FLAG_RESETTING 0x00000010U
|
||||
#define AQ_NIC_FLAG_CLOSING 0x00000020U
|
||||
#define AQ_NIC_PTP_DPATH_UP 0x02000000U
|
||||
#define AQ_NIC_LINK_DOWN 0x04000000U
|
||||
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
|
||||
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
|
||||
|
@ -129,6 +130,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self);
|
|||
int aq_nic_ndev_register(struct aq_nic_s *self);
|
||||
void aq_nic_ndev_free(struct aq_nic_s *self);
|
||||
int aq_nic_start(struct aq_nic_s *self);
|
||||
unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
|
||||
struct aq_ring_s *ring);
|
||||
int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
|
||||
int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
|
||||
int aq_nic_get_regs_count(struct aq_nic_s *self);
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* aQuantia Corporation Network Driver
|
||||
* Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
|
||||
* Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
|
||||
*/
|
||||
|
||||
/* File aq_pci_func.c: Definition of PCI functions. */
|
||||
|
@ -269,6 +269,9 @@ static int aq_pci_probe(struct pci_dev *pdev,
|
|||
numvecs = min((u8)AQ_CFG_VECS_DEF,
|
||||
aq_nic_get_cfg(self)->aq_hw_caps->msix_irqs);
|
||||
numvecs = min(numvecs, num_online_cpus());
|
||||
/* Request IRQ vector for PTP */
|
||||
numvecs += 1;
|
||||
|
||||
numvecs += AQ_HW_SERVICE_IRQS;
|
||||
/*enable interrupts */
|
||||
#if !AQ_CFG_FORCE_LEGACY_INT
|
||||
|
|
|
@ -8,12 +8,24 @@
|
|||
*/
|
||||
|
||||
#include <linux/ptp_clock_kernel.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/clocksource.h>
|
||||
|
||||
#include "aq_nic.h"
|
||||
#include "aq_ptp.h"
|
||||
#include "aq_ring.h"
|
||||
|
||||
#define AQ_PTP_TX_TIMEOUT (HZ * 10)
|
||||
|
||||
enum ptp_speed_offsets {
|
||||
ptp_offset_idx_10 = 0,
|
||||
ptp_offset_idx_100,
|
||||
ptp_offset_idx_1000,
|
||||
ptp_offset_idx_2500,
|
||||
ptp_offset_idx_5000,
|
||||
ptp_offset_idx_10000,
|
||||
};
|
||||
|
||||
struct ptp_skb_ring {
|
||||
struct sk_buff **buff;
|
||||
spinlock_t lock;
|
||||
|
@ -22,6 +34,12 @@ struct ptp_skb_ring {
|
|||
unsigned int tail;
|
||||
};
|
||||
|
||||
struct ptp_tx_timeout {
|
||||
spinlock_t lock;
|
||||
bool active;
|
||||
unsigned long tx_start;
|
||||
};
|
||||
|
||||
struct aq_ptp_s {
|
||||
struct aq_nic_s *aq_nic;
|
||||
spinlock_t ptp_lock;
|
||||
|
@ -29,8 +47,16 @@ struct aq_ptp_s {
|
|||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_info;
|
||||
|
||||
atomic_t offset_egress;
|
||||
atomic_t offset_ingress;
|
||||
|
||||
struct aq_ring_param_s ptp_ring_param;
|
||||
|
||||
struct ptp_tx_timeout ptp_tx_timeout;
|
||||
|
||||
unsigned int idx_vector;
|
||||
struct napi_struct napi;
|
||||
|
||||
struct aq_ring_s ptp_tx;
|
||||
struct aq_ring_s ptp_rx;
|
||||
struct aq_ring_s hwts_rx;
|
||||
|
@ -38,6 +64,101 @@ struct aq_ptp_s {
|
|||
struct ptp_skb_ring skb_ring;
|
||||
};
|
||||
|
||||
struct ptp_tm_offset {
|
||||
unsigned int mbps;
|
||||
int egress;
|
||||
int ingress;
|
||||
};
|
||||
|
||||
static struct ptp_tm_offset ptp_offset[6];
|
||||
|
||||
void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
int i, egress, ingress;
|
||||
|
||||
if (!aq_ptp)
|
||||
return;
|
||||
|
||||
egress = 0;
|
||||
ingress = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
|
||||
if (mbps == ptp_offset[i].mbps) {
|
||||
egress = ptp_offset[i].egress;
|
||||
ingress = ptp_offset[i].ingress;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
atomic_set(&aq_ptp->offset_egress, egress);
|
||||
atomic_set(&aq_ptp->offset_ingress, ingress);
|
||||
}
|
||||
|
||||
static int __aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
|
||||
{
|
||||
unsigned int next_head = (ring->head + 1) % ring->size;
|
||||
|
||||
if (next_head == ring->tail)
|
||||
return -ENOMEM;
|
||||
|
||||
ring->buff[ring->head] = skb_get(skb);
|
||||
ring->head = next_head;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aq_ptp_skb_put(struct ptp_skb_ring *ring, struct sk_buff *skb)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
ret = __aq_ptp_skb_put(ring, skb);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sk_buff *__aq_ptp_skb_get(struct ptp_skb_ring *ring)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
if (ring->tail == ring->head)
|
||||
return NULL;
|
||||
|
||||
skb = ring->buff[ring->tail];
|
||||
ring->tail = (ring->tail + 1) % ring->size;
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static struct sk_buff *aq_ptp_skb_get(struct ptp_skb_ring *ring)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct sk_buff *skb;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
skb = __aq_ptp_skb_get(ring);
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
static unsigned int aq_ptp_skb_buf_len(struct ptp_skb_ring *ring)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int len;
|
||||
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
len = (ring->head >= ring->tail) ?
|
||||
ring->head - ring->tail :
|
||||
ring->size - ring->tail + ring->head;
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
|
||||
{
|
||||
struct sk_buff **buff = kmalloc(sizeof(*buff) * size, GFP_KERNEL);
|
||||
|
@ -55,10 +176,75 @@ static int aq_ptp_skb_ring_init(struct ptp_skb_ring *ring, unsigned int size)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void aq_ptp_skb_ring_clean(struct ptp_skb_ring *ring)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
while ((skb = aq_ptp_skb_get(ring)) != NULL)
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
static void aq_ptp_skb_ring_release(struct ptp_skb_ring *ring)
|
||||
{
|
||||
kfree(ring->buff);
|
||||
ring->buff = NULL;
|
||||
if (ring->buff) {
|
||||
aq_ptp_skb_ring_clean(ring);
|
||||
kfree(ring->buff);
|
||||
ring->buff = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void aq_ptp_tx_timeout_init(struct ptp_tx_timeout *timeout)
|
||||
{
|
||||
spin_lock_init(&timeout->lock);
|
||||
timeout->active = false;
|
||||
}
|
||||
|
||||
static void aq_ptp_tx_timeout_start(struct aq_ptp_s *aq_ptp)
|
||||
{
|
||||
struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&timeout->lock, flags);
|
||||
timeout->active = true;
|
||||
timeout->tx_start = jiffies;
|
||||
spin_unlock_irqrestore(&timeout->lock, flags);
|
||||
}
|
||||
|
||||
static void aq_ptp_tx_timeout_update(struct aq_ptp_s *aq_ptp)
|
||||
{
|
||||
if (!aq_ptp_skb_buf_len(&aq_ptp->skb_ring)) {
|
||||
struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&timeout->lock, flags);
|
||||
timeout->active = false;
|
||||
spin_unlock_irqrestore(&timeout->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void aq_ptp_tx_timeout_check(struct aq_ptp_s *aq_ptp)
|
||||
{
|
||||
struct ptp_tx_timeout *timeout = &aq_ptp->ptp_tx_timeout;
|
||||
unsigned long flags;
|
||||
bool timeout_flag;
|
||||
|
||||
timeout_flag = false;
|
||||
|
||||
spin_lock_irqsave(&timeout->lock, flags);
|
||||
if (timeout->active) {
|
||||
timeout_flag = time_is_before_jiffies(timeout->tx_start +
|
||||
AQ_PTP_TX_TIMEOUT);
|
||||
/* reset active flag if timeout detected */
|
||||
if (timeout_flag)
|
||||
timeout->active = false;
|
||||
}
|
||||
spin_unlock_irqrestore(&timeout->lock, flags);
|
||||
|
||||
if (timeout_flag) {
|
||||
aq_ptp_skb_ring_clean(&aq_ptp->skb_ring);
|
||||
netdev_err(aq_ptp->aq_nic->ndev,
|
||||
"PTP Timeout. Clearing Tx Timestamp SKBs\n");
|
||||
}
|
||||
}
|
||||
|
||||
/* aq_ptp_adjfine
|
||||
|
@ -148,6 +334,263 @@ static int aq_ptp_settime(struct ptp_clock_info *ptp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void aq_ptp_convert_to_hwtstamp(struct aq_ptp_s *aq_ptp,
|
||||
struct skb_shared_hwtstamps *hwtstamp,
|
||||
u64 timestamp)
|
||||
{
|
||||
memset(hwtstamp, 0, sizeof(*hwtstamp));
|
||||
hwtstamp->hwtstamp = ns_to_ktime(timestamp);
|
||||
}
|
||||
|
||||
/* aq_ptp_tx_hwtstamp - utility function which checks for TX time stamp
|
||||
* @adapter: the private adapter struct
|
||||
*
|
||||
* if the timestamp is valid, we convert it into the timecounter ns
|
||||
* value, then store that result into the hwtstamps structure which
|
||||
* is passed up the network stack
|
||||
*/
|
||||
void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
struct sk_buff *skb = aq_ptp_skb_get(&aq_ptp->skb_ring);
|
||||
struct skb_shared_hwtstamps hwtstamp;
|
||||
|
||||
if (!skb) {
|
||||
netdev_err(aq_nic->ndev, "have timestamp but tx_queus empty\n");
|
||||
return;
|
||||
}
|
||||
|
||||
timestamp += atomic_read(&aq_ptp->offset_egress);
|
||||
aq_ptp_convert_to_hwtstamp(aq_ptp, &hwtstamp, timestamp);
|
||||
skb_tstamp_tx(skb, &hwtstamp);
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
aq_ptp_tx_timeout_update(aq_ptp);
|
||||
}
|
||||
|
||||
/* aq_ptp_rx_hwtstamp - utility function which checks for RX time stamp
|
||||
* @adapter: pointer to adapter struct
|
||||
* @skb: particular skb to send timestamp with
|
||||
*
|
||||
* if the timestamp is valid, we convert it into the timecounter ns
|
||||
* value, then store that result into the hwtstamps structure which
|
||||
* is passed up the network stack
|
||||
*/
|
||||
static void aq_ptp_rx_hwtstamp(struct aq_ptp_s *aq_ptp, struct sk_buff *skb,
|
||||
u64 timestamp)
|
||||
{
|
||||
timestamp -= atomic_read(&aq_ptp->offset_ingress);
|
||||
aq_ptp_convert_to_hwtstamp(aq_ptp, skb_hwtstamps(skb), timestamp);
|
||||
}
|
||||
|
||||
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
|
||||
if (!aq_ptp)
|
||||
return false;
|
||||
|
||||
return &aq_ptp->ptp_tx == ring ||
|
||||
&aq_ptp->ptp_rx == ring || &aq_ptp->hwts_rx == ring;
|
||||
}
|
||||
|
||||
u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
|
||||
unsigned int len)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
u64 timestamp = 0;
|
||||
u16 ret = aq_nic->aq_hw_ops->rx_extract_ts(aq_nic->aq_hw,
|
||||
p, len, ×tamp);
|
||||
|
||||
if (ret > 0)
|
||||
aq_ptp_rx_hwtstamp(aq_ptp, skb, timestamp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int aq_ptp_poll(struct napi_struct *napi, int budget)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = container_of(napi, struct aq_ptp_s, napi);
|
||||
struct aq_nic_s *aq_nic = aq_ptp->aq_nic;
|
||||
bool was_cleaned = false;
|
||||
int work_done = 0;
|
||||
int err;
|
||||
|
||||
/* Processing PTP TX traffic */
|
||||
err = aq_nic->aq_hw_ops->hw_ring_tx_head_update(aq_nic->aq_hw,
|
||||
&aq_ptp->ptp_tx);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
if (aq_ptp->ptp_tx.sw_head != aq_ptp->ptp_tx.hw_head) {
|
||||
aq_ring_tx_clean(&aq_ptp->ptp_tx);
|
||||
|
||||
was_cleaned = true;
|
||||
}
|
||||
|
||||
/* Processing HW_TIMESTAMP RX traffic */
|
||||
err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_receive(aq_nic->aq_hw,
|
||||
&aq_ptp->hwts_rx);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
if (aq_ptp->hwts_rx.sw_head != aq_ptp->hwts_rx.hw_head) {
|
||||
aq_ring_hwts_rx_clean(&aq_ptp->hwts_rx, aq_nic);
|
||||
|
||||
err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
|
||||
&aq_ptp->hwts_rx);
|
||||
|
||||
was_cleaned = true;
|
||||
}
|
||||
|
||||
/* Processing PTP RX traffic */
|
||||
err = aq_nic->aq_hw_ops->hw_ring_rx_receive(aq_nic->aq_hw,
|
||||
&aq_ptp->ptp_rx);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
if (aq_ptp->ptp_rx.sw_head != aq_ptp->ptp_rx.hw_head) {
|
||||
unsigned int sw_tail_old;
|
||||
|
||||
err = aq_ring_rx_clean(&aq_ptp->ptp_rx, napi, &work_done, budget);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
sw_tail_old = aq_ptp->ptp_rx.sw_tail;
|
||||
err = aq_ring_rx_fill(&aq_ptp->ptp_rx);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
err = aq_nic->aq_hw_ops->hw_ring_rx_fill(aq_nic->aq_hw,
|
||||
&aq_ptp->ptp_rx,
|
||||
sw_tail_old);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
if (was_cleaned)
|
||||
work_done = budget;
|
||||
|
||||
if (work_done < budget) {
|
||||
napi_complete_done(napi, work_done);
|
||||
aq_nic->aq_hw_ops->hw_irq_enable(aq_nic->aq_hw,
|
||||
1 << aq_ptp->ptp_ring_param.vec_idx);
|
||||
}
|
||||
|
||||
err_exit:
|
||||
return work_done;
|
||||
}
|
||||
|
||||
static irqreturn_t aq_ptp_isr(int irq, void *private)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = private;
|
||||
int err = 0;
|
||||
|
||||
if (!aq_ptp) {
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
napi_schedule(&aq_ptp->napi);
|
||||
|
||||
err_exit:
|
||||
return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
struct aq_ring_s *ring = &aq_ptp->ptp_tx;
|
||||
unsigned long irq_flags;
|
||||
int err = NETDEV_TX_OK;
|
||||
unsigned int frags;
|
||||
|
||||
if (skb->len <= 0) {
|
||||
dev_kfree_skb_any(skb);
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
frags = skb_shinfo(skb)->nr_frags + 1;
|
||||
/* Frags cannot be bigger 16KB
|
||||
* because PTP usually works
|
||||
* without Jumbo even in a background
|
||||
*/
|
||||
if (frags > AQ_CFG_SKB_FRAGS_MAX || frags > aq_ring_avail_dx(ring)) {
|
||||
/* Drop packet because it doesn't make sence to delay it */
|
||||
dev_kfree_skb_any(skb);
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
err = aq_ptp_skb_put(&aq_ptp->skb_ring, skb);
|
||||
if (err) {
|
||||
netdev_err(aq_nic->ndev, "SKB Ring is overflow (%u)!\n",
|
||||
ring->size);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
aq_ptp_tx_timeout_start(aq_ptp);
|
||||
skb_tx_timestamp(skb);
|
||||
|
||||
spin_lock_irqsave(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
|
||||
frags = aq_nic_map_skb(aq_nic, skb, ring);
|
||||
|
||||
if (likely(frags)) {
|
||||
err = aq_nic->aq_hw_ops->hw_ring_tx_xmit(aq_nic->aq_hw,
|
||||
ring, frags);
|
||||
if (err >= 0) {
|
||||
++ring->stats.tx.packets;
|
||||
ring->stats.tx.bytes += skb->len;
|
||||
}
|
||||
} else {
|
||||
err = NETDEV_TX_BUSY;
|
||||
}
|
||||
spin_unlock_irqrestore(&aq_nic->aq_ptp->ptp_ring_lock, irq_flags);
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
void aq_ptp_service_task(struct aq_nic_s *aq_nic)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
|
||||
if (!aq_ptp)
|
||||
return;
|
||||
|
||||
aq_ptp_tx_timeout_check(aq_ptp);
|
||||
}
|
||||
|
||||
int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic)
|
||||
{
|
||||
struct pci_dev *pdev = aq_nic->pdev;
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
int err = 0;
|
||||
|
||||
if (!aq_ptp)
|
||||
return 0;
|
||||
|
||||
if (pdev->msix_enabled || pdev->msi_enabled) {
|
||||
err = request_irq(pci_irq_vector(pdev, aq_ptp->idx_vector),
|
||||
aq_ptp_isr, 0, aq_nic->ndev->name, aq_ptp);
|
||||
} else {
|
||||
err = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
void aq_ptp_irq_free(struct aq_nic_s *aq_nic)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
struct pci_dev *pdev = aq_nic->pdev;
|
||||
|
||||
if (!aq_ptp)
|
||||
return;
|
||||
|
||||
free_irq(pci_irq_vector(pdev, aq_ptp->idx_vector), aq_ptp);
|
||||
}
|
||||
|
||||
int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
|
@ -189,6 +632,12 @@ int aq_ptp_ring_init(struct aq_nic_s *aq_nic)
|
|||
err = aq_nic->aq_hw_ops->hw_ring_rx_init(aq_nic->aq_hw,
|
||||
&aq_ptp->hwts_rx,
|
||||
&aq_ptp->ptp_ring_param);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
err = aq_nic->aq_hw_ops->hw_ring_hwts_rx_fill(aq_nic->aq_hw,
|
||||
&aq_ptp->hwts_rx);
|
||||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
return err;
|
||||
|
||||
|
@ -219,6 +668,8 @@ int aq_ptp_ring_start(struct aq_nic_s *aq_nic)
|
|||
if (err < 0)
|
||||
goto err_exit;
|
||||
|
||||
napi_enable(&aq_ptp->napi);
|
||||
|
||||
err_exit:
|
||||
return err;
|
||||
}
|
||||
|
@ -234,6 +685,8 @@ void aq_ptp_ring_stop(struct aq_nic_s *aq_nic)
|
|||
aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->ptp_rx);
|
||||
|
||||
aq_nic->aq_hw_ops->hw_ring_rx_stop(aq_nic->aq_hw, &aq_ptp->hwts_rx);
|
||||
|
||||
napi_disable(&aq_ptp->napi);
|
||||
}
|
||||
|
||||
void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic)
|
||||
|
@ -306,6 +759,12 @@ int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic)
|
|||
goto err_exit_hwts_rx;
|
||||
}
|
||||
|
||||
aq_ptp->ptp_ring_param.vec_idx = aq_ptp->idx_vector;
|
||||
aq_ptp->ptp_ring_param.cpu = aq_ptp->ptp_ring_param.vec_idx +
|
||||
aq_nic_get_cfg(aq_nic)->aq_rss.base_cpu_number;
|
||||
cpumask_set_cpu(aq_ptp->ptp_ring_param.cpu,
|
||||
&aq_ptp->ptp_ring_param.affinity_mask);
|
||||
|
||||
return 0;
|
||||
|
||||
err_exit_hwts_rx:
|
||||
|
@ -347,6 +806,60 @@ static struct ptp_clock_info aq_ptp_clock = {
|
|||
.pin_config = NULL,
|
||||
};
|
||||
|
||||
#define ptp_offset_init(__idx, __mbps, __egress, __ingress) do { \
|
||||
ptp_offset[__idx].mbps = (__mbps); \
|
||||
ptp_offset[__idx].egress = (__egress); \
|
||||
ptp_offset[__idx].ingress = (__ingress); } \
|
||||
while (0)
|
||||
|
||||
static void aq_ptp_offset_init_from_fw(const struct hw_aq_ptp_offset *offsets)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Load offsets for PTP */
|
||||
for (i = 0; i < ARRAY_SIZE(ptp_offset); i++) {
|
||||
switch (i) {
|
||||
/* 100M */
|
||||
case ptp_offset_idx_100:
|
||||
ptp_offset_init(i, 100,
|
||||
offsets->egress_100,
|
||||
offsets->ingress_100);
|
||||
break;
|
||||
/* 1G */
|
||||
case ptp_offset_idx_1000:
|
||||
ptp_offset_init(i, 1000,
|
||||
offsets->egress_1000,
|
||||
offsets->ingress_1000);
|
||||
break;
|
||||
/* 2.5G */
|
||||
case ptp_offset_idx_2500:
|
||||
ptp_offset_init(i, 2500,
|
||||
offsets->egress_2500,
|
||||
offsets->ingress_2500);
|
||||
break;
|
||||
/* 5G */
|
||||
case ptp_offset_idx_5000:
|
||||
ptp_offset_init(i, 5000,
|
||||
offsets->egress_5000,
|
||||
offsets->ingress_5000);
|
||||
break;
|
||||
/* 10G */
|
||||
case ptp_offset_idx_10000:
|
||||
ptp_offset_init(i, 10000,
|
||||
offsets->egress_10000,
|
||||
offsets->ingress_10000);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void aq_ptp_offset_init(const struct hw_aq_ptp_offset *offsets)
|
||||
{
|
||||
memset(ptp_offset, 0, sizeof(ptp_offset));
|
||||
|
||||
aq_ptp_offset_init_from_fw(offsets);
|
||||
}
|
||||
|
||||
void aq_ptp_clock_init(struct aq_nic_s *aq_nic)
|
||||
{
|
||||
struct aq_ptp_s *aq_ptp = aq_nic->aq_ptp;
|
||||
|
@ -380,6 +893,8 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
|
|||
return 0;
|
||||
}
|
||||
|
||||
aq_ptp_offset_init(&mbox.info.ptp_offset);
|
||||
|
||||
aq_ptp = kzalloc(sizeof(*aq_ptp), GFP_KERNEL);
|
||||
if (!aq_ptp) {
|
||||
err = -ENOMEM;
|
||||
|
@ -399,6 +914,15 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
|
|||
goto err_exit;
|
||||
}
|
||||
aq_ptp->ptp_clock = clock;
|
||||
aq_ptp_tx_timeout_init(&aq_ptp->ptp_tx_timeout);
|
||||
|
||||
atomic_set(&aq_ptp->offset_egress, 0);
|
||||
atomic_set(&aq_ptp->offset_ingress, 0);
|
||||
|
||||
netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
|
||||
aq_ptp_poll, AQ_CFG_NAPI_WEIGHT);
|
||||
|
||||
aq_ptp->idx_vector = idx_vec;
|
||||
|
||||
aq_nic->aq_ptp = aq_ptp;
|
||||
|
||||
|
@ -439,6 +963,12 @@ void aq_ptp_free(struct aq_nic_s *aq_nic)
|
|||
aq_nic->aq_fw_ops->enable_ptp(aq_nic->aq_hw, 0);
|
||||
mutex_unlock(&aq_nic->fwreq_mutex);
|
||||
|
||||
netif_napi_del(&aq_ptp->napi);
|
||||
kfree(aq_ptp);
|
||||
aq_nic->aq_ptp = NULL;
|
||||
}
|
||||
|
||||
struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp)
|
||||
{
|
||||
return aq_ptp->ptp_clock;
|
||||
}
|
||||
|
|
|
@ -17,6 +17,9 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec);
|
|||
void aq_ptp_unregister(struct aq_nic_s *aq_nic);
|
||||
void aq_ptp_free(struct aq_nic_s *aq_nic);
|
||||
|
||||
int aq_ptp_irq_alloc(struct aq_nic_s *aq_nic);
|
||||
void aq_ptp_irq_free(struct aq_nic_s *aq_nic);
|
||||
|
||||
int aq_ptp_ring_alloc(struct aq_nic_s *aq_nic);
|
||||
void aq_ptp_ring_free(struct aq_nic_s *aq_nic);
|
||||
|
||||
|
@ -25,6 +28,22 @@ int aq_ptp_ring_start(struct aq_nic_s *aq_nic);
|
|||
void aq_ptp_ring_stop(struct aq_nic_s *aq_nic);
|
||||
void aq_ptp_ring_deinit(struct aq_nic_s *aq_nic);
|
||||
|
||||
void aq_ptp_service_task(struct aq_nic_s *aq_nic);
|
||||
|
||||
void aq_ptp_tm_offset_set(struct aq_nic_s *aq_nic, unsigned int mbps);
|
||||
|
||||
void aq_ptp_clock_init(struct aq_nic_s *aq_nic);
|
||||
|
||||
/* Traffic processing functions */
|
||||
int aq_ptp_xmit(struct aq_nic_s *aq_nic, struct sk_buff *skb);
|
||||
void aq_ptp_tx_hwtstamp(struct aq_nic_s *aq_nic, u64 timestamp);
|
||||
|
||||
/* Return either ring is belong to PTP or not*/
|
||||
bool aq_ptp_ring(struct aq_nic_s *aq_nic, struct aq_ring_s *ring);
|
||||
|
||||
u16 aq_ptp_extract_ts(struct aq_nic_s *aq_nic, struct sk_buff *skb, u8 *p,
|
||||
unsigned int len);
|
||||
|
||||
struct ptp_clock *aq_ptp_get_ptp_clock(struct aq_ptp_s *aq_ptp);
|
||||
|
||||
#endif /* AQ_PTP_H */
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include "aq_nic.h"
|
||||
#include "aq_hw.h"
|
||||
#include "aq_hw_utils.h"
|
||||
#include "aq_ptp.h"
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/etherdevice.h>
|
||||
|
@ -314,6 +315,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
self->sw_head = aq_ring_next_dx(self, self->sw_head),
|
||||
--budget, ++(*work_done)) {
|
||||
struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
|
||||
bool is_ptp_ring = aq_ptp_ring(self->aq_nic, self);
|
||||
struct aq_ring_buff_s *buff_ = NULL;
|
||||
struct sk_buff *skb = NULL;
|
||||
unsigned int next_ = 0U;
|
||||
|
@ -378,6 +380,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
err = -ENOMEM;
|
||||
goto err_exit;
|
||||
}
|
||||
if (is_ptp_ring)
|
||||
buff->len -=
|
||||
aq_ptp_extract_ts(self->aq_nic, skb,
|
||||
aq_buf_vaddr(&buff->rxdata),
|
||||
buff->len);
|
||||
skb_put(skb, buff->len);
|
||||
page_ref_inc(buff->rxdata.page);
|
||||
} else {
|
||||
|
@ -386,6 +393,11 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
err = -ENOMEM;
|
||||
goto err_exit;
|
||||
}
|
||||
if (is_ptp_ring)
|
||||
buff->len -=
|
||||
aq_ptp_extract_ts(self->aq_nic, skb,
|
||||
aq_buf_vaddr(&buff->rxdata),
|
||||
buff->len);
|
||||
|
||||
hdr_len = buff->len;
|
||||
if (hdr_len > AQ_CFG_RX_HDR_SIZE)
|
||||
|
@ -445,8 +457,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
skb_set_hash(skb, buff->rss_hash,
|
||||
buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
|
||||
PKT_HASH_TYPE_NONE);
|
||||
|
||||
skb_record_rx_queue(skb, self->idx);
|
||||
/* Send all PTP traffic to 0 queue */
|
||||
skb_record_rx_queue(skb, is_ptp_ring ? 0 : self->idx);
|
||||
|
||||
++self->stats.rx.packets;
|
||||
self->stats.rx.bytes += skb->len;
|
||||
|
@ -458,6 +470,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
return err;
|
||||
}
|
||||
|
||||
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic)
|
||||
{
|
||||
while (self->sw_head != self->hw_head) {
|
||||
u64 ns;
|
||||
|
||||
aq_nic->aq_hw_ops->extract_hwts(aq_nic->aq_hw,
|
||||
self->dx_ring +
|
||||
(self->sw_head * self->dx_size),
|
||||
self->dx_size, &ns);
|
||||
aq_ptp_tx_hwtstamp(aq_nic, ns);
|
||||
|
||||
self->sw_head = aq_ring_next_dx(self, self->sw_head);
|
||||
}
|
||||
}
|
||||
|
||||
int aq_ring_rx_fill(struct aq_ring_s *self)
|
||||
{
|
||||
unsigned int page_order = self->page_order;
|
||||
|
|
|
@ -177,5 +177,6 @@ int aq_ring_rx_fill(struct aq_ring_s *self);
|
|||
struct aq_ring_s *aq_ring_hwts_rx_alloc(struct aq_ring_s *self,
|
||||
struct aq_nic_s *aq_nic, unsigned int idx,
|
||||
unsigned int size, unsigned int dx_size);
|
||||
void aq_ring_hwts_rx_clean(struct aq_ring_s *self, struct aq_nic_s *aq_nic);
|
||||
|
||||
#endif /* AQ_RING_H */
|
||||
|
|
|
@ -682,6 +682,46 @@ static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
|
|||
return aq_hw_err_from_flags(self);
|
||||
}
|
||||
|
||||
static int hw_atl_b0_hw_ring_hwts_rx_fill(struct aq_hw_s *self,
|
||||
struct aq_ring_s *ring)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = aq_ring_avail_dx(ring); i--;
|
||||
ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail)) {
|
||||
struct hw_atl_rxd_s *rxd =
|
||||
(struct hw_atl_rxd_s *)
|
||||
&ring->dx_ring[ring->sw_tail * HW_ATL_B0_RXD_SIZE];
|
||||
|
||||
rxd->buf_addr = ring->dx_ring_pa + ring->size * ring->dx_size;
|
||||
rxd->hdr_addr = 0U;
|
||||
}
|
||||
/* Make sure descriptors are updated before bump tail*/
|
||||
wmb();
|
||||
|
||||
hw_atl_reg_rx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
|
||||
|
||||
return aq_hw_err_from_flags(self);
|
||||
}
|
||||
|
||||
static int hw_atl_b0_hw_ring_hwts_rx_receive(struct aq_hw_s *self,
|
||||
struct aq_ring_s *ring)
|
||||
{
|
||||
while (ring->hw_head != ring->sw_tail) {
|
||||
struct hw_atl_rxd_hwts_wb_s *hwts_wb =
|
||||
(struct hw_atl_rxd_hwts_wb_s *)
|
||||
(ring->dx_ring + (ring->hw_head * HW_ATL_B0_RXD_SIZE));
|
||||
|
||||
/* RxD is not done */
|
||||
if (!(hwts_wb->sec_lw0 & 0x1U))
|
||||
break;
|
||||
|
||||
ring->hw_head = aq_ring_next_dx(ring, ring->hw_head);
|
||||
}
|
||||
|
||||
return aq_hw_err_from_flags(self);
|
||||
}
|
||||
|
||||
static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
|
||||
struct aq_ring_s *ring)
|
||||
{
|
||||
|
@ -1133,6 +1173,61 @@ static int hw_atl_b0_adj_clock_freq(struct aq_hw_s *self, s32 ppb)
|
|||
return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
|
||||
}
|
||||
|
||||
static u16 hw_atl_b0_rx_extract_ts(struct aq_hw_s *self, u8 *p,
|
||||
unsigned int len, u64 *timestamp)
|
||||
{
|
||||
unsigned int offset = 14;
|
||||
struct ethhdr *eth;
|
||||
u64 sec;
|
||||
u8 *ptr;
|
||||
u32 ns;
|
||||
|
||||
if (len <= offset || !timestamp)
|
||||
return 0;
|
||||
|
||||
/* The TIMESTAMP in the end of package has following format:
|
||||
* (big-endian)
|
||||
* struct {
|
||||
* uint64_t sec;
|
||||
* uint32_t ns;
|
||||
* uint16_t stream_id;
|
||||
* };
|
||||
*/
|
||||
ptr = p + (len - offset);
|
||||
memcpy(&sec, ptr, sizeof(sec));
|
||||
ptr += sizeof(sec);
|
||||
memcpy(&ns, ptr, sizeof(ns));
|
||||
|
||||
sec = be64_to_cpu(sec) & 0xffffffffffffllu;
|
||||
ns = be32_to_cpu(ns);
|
||||
*timestamp = sec * NSEC_PER_SEC + ns + self->ptp_clk_offset;
|
||||
|
||||
eth = (struct ethhdr *)p;
|
||||
|
||||
return (eth->h_proto == htons(ETH_P_1588)) ? 12 : 14;
|
||||
}
|
||||
|
||||
static int hw_atl_b0_extract_hwts(struct aq_hw_s *self, u8 *p, unsigned int len,
|
||||
u64 *timestamp)
|
||||
{
|
||||
struct hw_atl_rxd_hwts_wb_s *hwts_wb = (struct hw_atl_rxd_hwts_wb_s *)p;
|
||||
u64 tmp, sec, ns;
|
||||
|
||||
sec = 0;
|
||||
tmp = (hwts_wb->sec_lw0 >> 2) & 0x3ff;
|
||||
sec += tmp;
|
||||
tmp = (u64)((hwts_wb->sec_lw1 >> 16) & 0xffff) << 10;
|
||||
sec += tmp;
|
||||
tmp = (u64)(hwts_wb->sec_hw & 0xfff) << 26;
|
||||
sec += tmp;
|
||||
tmp = (u64)((hwts_wb->sec_hw >> 22) & 0x3ff) << 38;
|
||||
sec += tmp;
|
||||
ns = sec * NSEC_PER_SEC + hwts_wb->ns;
|
||||
if (timestamp)
|
||||
*timestamp = ns + self->ptp_clk_offset;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self,
|
||||
struct aq_rx_filter_l3l4 *data)
|
||||
{
|
||||
|
@ -1309,11 +1404,16 @@ const struct aq_hw_ops hw_atl_ops_b0 = {
|
|||
.hw_tx_tc_mode_get = hw_atl_b0_tx_tc_mode_get,
|
||||
.hw_rx_tc_mode_get = hw_atl_b0_rx_tc_mode_get,
|
||||
|
||||
.hw_ring_hwts_rx_fill = hw_atl_b0_hw_ring_hwts_rx_fill,
|
||||
.hw_ring_hwts_rx_receive = hw_atl_b0_hw_ring_hwts_rx_receive,
|
||||
|
||||
.hw_get_ptp_ts = hw_atl_b0_get_ptp_ts,
|
||||
.hw_adj_sys_clock = hw_atl_b0_adj_sys_clock,
|
||||
.hw_set_sys_clock = hw_atl_b0_set_sys_clock,
|
||||
.hw_adj_clock_freq = hw_atl_b0_adj_clock_freq,
|
||||
|
||||
.rx_extract_ts = hw_atl_b0_rx_extract_ts,
|
||||
.extract_hwts = hw_atl_b0_extract_hwts,
|
||||
.hw_set_offload = hw_atl_b0_hw_offload_set,
|
||||
.hw_set_fc = hw_atl_b0_set_fc,
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue