mirror of https://gitee.com/openkylin/linux.git
Merge branch 'DPAA-Ethernet-fixes'
Madalin Bucur says: ==================== DPAA Ethernet fixes Fixed an issue on the Tx path that was visible in netperf TCP_SENDFILE tests. Addressed another issue with Rx errors not being always counted. Adding control for allmulti. v2: rephrased commit message, reduced changes in the SG mapping fix ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
b17db8c0a1
|
@ -454,6 +454,16 @@ static void dpaa_set_rx_mode(struct net_device *net_dev)
|
|||
err);
|
||||
}
|
||||
|
||||
if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
|
||||
priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
|
||||
err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
|
||||
priv->mac_dev->allmulti);
|
||||
if (err < 0)
|
||||
netif_err(priv, drv, net_dev,
|
||||
"mac_dev->set_allmulti() = %d\n",
|
||||
err);
|
||||
}
|
||||
|
||||
err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
|
||||
if (err < 0)
|
||||
netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
|
||||
|
@ -1916,8 +1926,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|||
goto csum_failed;
|
||||
}
|
||||
|
||||
/* SGT[0] is used by the linear part */
|
||||
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
|
||||
qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
|
||||
frag_len = skb_headlen(skb);
|
||||
qm_sg_entry_set_len(&sgt[0], frag_len);
|
||||
sgt[0].bpid = FSL_DPAA_BPID_INV;
|
||||
sgt[0].offset = 0;
|
||||
addr = dma_map_single(dev, skb->data,
|
||||
|
@ -1930,9 +1942,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|||
qm_sg_entry_set64(&sgt[0], addr);
|
||||
|
||||
/* populate the rest of SGT entries */
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
frag_len = frag->size;
|
||||
for (i = 1; i <= nr_frags; i++, frag++) {
|
||||
for (i = 0; i < nr_frags; i++) {
|
||||
frag = &skb_shinfo(skb)->frags[i];
|
||||
frag_len = frag->size;
|
||||
WARN_ON(!skb_frag_page(frag));
|
||||
addr = skb_frag_dma_map(dev, frag, 0,
|
||||
frag_len, dma_dir);
|
||||
|
@ -1942,15 +1954,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
|
|||
goto sg_map_failed;
|
||||
}
|
||||
|
||||
qm_sg_entry_set_len(&sgt[i], frag_len);
|
||||
sgt[i].bpid = FSL_DPAA_BPID_INV;
|
||||
sgt[i].offset = 0;
|
||||
qm_sg_entry_set_len(&sgt[i + 1], frag_len);
|
||||
sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
|
||||
sgt[i + 1].offset = 0;
|
||||
|
||||
/* keep the offset in the address */
|
||||
qm_sg_entry_set64(&sgt[i], addr);
|
||||
frag_len = frag->size;
|
||||
qm_sg_entry_set64(&sgt[i + 1], addr);
|
||||
}
|
||||
qm_sg_entry_set_f(&sgt[i - 1], frag_len);
|
||||
|
||||
/* Set the final bit in the last used entry of the SGT */
|
||||
qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
|
||||
|
||||
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
|
||||
|
||||
|
@ -2052,19 +2065,23 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
|
|||
/* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
|
||||
* make sure we don't feed FMan with more fragments than it supports.
|
||||
*/
|
||||
if (nonlinear &&
|
||||
likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
|
||||
if (unlikely(nonlinear &&
|
||||
(skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
|
||||
/* If the egress skb contains more fragments than we support
|
||||
* we have no choice but to linearize it ourselves.
|
||||
*/
|
||||
if (__skb_linearize(skb))
|
||||
goto enomem;
|
||||
|
||||
nonlinear = skb_is_nonlinear(skb);
|
||||
}
|
||||
|
||||
if (nonlinear) {
|
||||
/* Just create a S/G fd based on the skb */
|
||||
err = skb_to_sg_fd(priv, skb, &fd);
|
||||
percpu_priv->tx_frag_skbuffs++;
|
||||
} else {
|
||||
/* If the egress skb contains more fragments than we support
|
||||
* we have no choice but to linearize it ourselves.
|
||||
*/
|
||||
if (unlikely(nonlinear) && __skb_linearize(skb))
|
||||
goto enomem;
|
||||
|
||||
/* Finally, create a contig FD from this skb */
|
||||
/* Create a contig FD from this skb */
|
||||
err = skb_to_contig_fd(priv, skb, &fd, &offset);
|
||||
}
|
||||
if (unlikely(err < 0))
|
||||
|
@ -2201,14 +2218,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
|
|||
if (dpaa_eth_napi_schedule(percpu_priv, portal))
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
if (dpaa_eth_refill_bpools(priv))
|
||||
/* Unable to refill the buffer pool due to insufficient
|
||||
* system memory. Just release the frame back into the pool,
|
||||
* otherwise we'll soon end up with an empty buffer pool.
|
||||
*/
|
||||
dpaa_fd_release(net_dev, &dq->fd);
|
||||
else
|
||||
dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
|
||||
dpaa_eth_refill_bpools(priv);
|
||||
dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
|
||||
|
||||
return qman_cb_dqrr_consume;
|
||||
}
|
||||
|
|
|
@ -1117,6 +1117,25 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
|
||||
{
|
||||
u32 tmp;
|
||||
struct dtsec_regs __iomem *regs = dtsec->regs;
|
||||
|
||||
if (!is_init_done(dtsec->dtsec_drv_param))
|
||||
return -EINVAL;
|
||||
|
||||
tmp = ioread32be(®s->rctrl);
|
||||
if (enable)
|
||||
tmp |= RCTRL_MPROM;
|
||||
else
|
||||
tmp &= ~RCTRL_MPROM;
|
||||
|
||||
iowrite32be(tmp, ®s->rctrl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
|
||||
{
|
||||
struct dtsec_regs __iomem *regs = dtsec->regs;
|
||||
|
|
|
@ -55,5 +55,6 @@ int dtsec_set_exception(struct fman_mac *dtsec,
|
|||
int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
|
||||
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
|
||||
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
|
||||
int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
|
||||
|
||||
#endif /* __DTSEC_H */
|
||||
|
|
|
@ -350,6 +350,7 @@ struct fman_mac {
|
|||
struct fman_rev_info fm_rev_info;
|
||||
bool basex_if;
|
||||
struct phy_device *pcsphy;
|
||||
bool allmulti_enabled;
|
||||
};
|
||||
|
||||
static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
|
||||
|
@ -940,6 +941,29 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int memac_set_allmulti(struct fman_mac *memac, bool enable)
|
||||
{
|
||||
u32 entry;
|
||||
struct memac_regs __iomem *regs = memac->regs;
|
||||
|
||||
if (!is_init_done(memac->memac_drv_param))
|
||||
return -EINVAL;
|
||||
|
||||
if (enable) {
|
||||
for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
|
||||
iowrite32be(entry | HASH_CTRL_MCAST_EN,
|
||||
®s->hashtable_ctrl);
|
||||
} else {
|
||||
for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
|
||||
iowrite32be(entry & ~HASH_CTRL_MCAST_EN,
|
||||
®s->hashtable_ctrl);
|
||||
}
|
||||
|
||||
memac->allmulti_enabled = enable;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
|
||||
{
|
||||
struct memac_regs __iomem *regs = memac->regs;
|
||||
|
@ -963,8 +987,12 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
|
||||
iowrite32be(hash & ~HASH_CTRL_MCAST_EN, ®s->hashtable_ctrl);
|
||||
|
||||
if (!memac->allmulti_enabled) {
|
||||
if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
|
||||
iowrite32be(hash & ~HASH_CTRL_MCAST_EN,
|
||||
®s->hashtable_ctrl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -57,5 +57,6 @@ int memac_set_exception(struct fman_mac *memac,
|
|||
enum fman_mac_exceptions exception, bool enable);
|
||||
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
|
||||
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
|
||||
int memac_set_allmulti(struct fman_mac *memac, bool enable);
|
||||
|
||||
#endif /* __MEMAC_H */
|
||||
|
|
|
@ -217,6 +217,7 @@ struct fman_mac {
|
|||
struct tgec_cfg *cfg;
|
||||
void *fm;
|
||||
struct fman_rev_info fm_rev_info;
|
||||
bool allmulti_enabled;
|
||||
};
|
||||
|
||||
static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
|
||||
|
@ -564,6 +565,29 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
|
||||
{
|
||||
u32 entry;
|
||||
struct tgec_regs __iomem *regs = tgec->regs;
|
||||
|
||||
if (!is_init_done(tgec->cfg))
|
||||
return -EINVAL;
|
||||
|
||||
if (enable) {
|
||||
for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
|
||||
iowrite32be(entry | TGEC_HASH_MCAST_EN,
|
||||
®s->hashtable_ctrl);
|
||||
} else {
|
||||
for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
|
||||
iowrite32be(entry & ~TGEC_HASH_MCAST_EN,
|
||||
®s->hashtable_ctrl);
|
||||
}
|
||||
|
||||
tgec->allmulti_enabled = enable;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
||||
{
|
||||
struct tgec_regs __iomem *regs = tgec->regs;
|
||||
|
@ -591,9 +615,12 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
|
||||
iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
|
||||
®s->hashtable_ctrl);
|
||||
|
||||
if (!tgec->allmulti_enabled) {
|
||||
if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
|
||||
iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
|
||||
®s->hashtable_ctrl);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -51,5 +51,6 @@ int tgec_set_exception(struct fman_mac *tgec,
|
|||
int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
|
||||
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
|
||||
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
|
||||
int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
|
||||
|
||||
#endif /* __TGEC_H */
|
||||
|
|
|
@ -470,6 +470,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
|
|||
mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
|
||||
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
|
||||
mac_dev->set_exception = dtsec_set_exception;
|
||||
mac_dev->set_allmulti = dtsec_set_allmulti;
|
||||
mac_dev->set_multi = set_multi;
|
||||
mac_dev->start = start;
|
||||
mac_dev->stop = stop;
|
||||
|
@ -488,6 +489,7 @@ static void setup_tgec(struct mac_device *mac_dev)
|
|||
mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
|
||||
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
|
||||
mac_dev->set_exception = tgec_set_exception;
|
||||
mac_dev->set_allmulti = tgec_set_allmulti;
|
||||
mac_dev->set_multi = set_multi;
|
||||
mac_dev->start = start;
|
||||
mac_dev->stop = stop;
|
||||
|
@ -506,6 +508,7 @@ static void setup_memac(struct mac_device *mac_dev)
|
|||
mac_dev->set_tx_pause = memac_set_tx_pause_frames;
|
||||
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
|
||||
mac_dev->set_exception = memac_set_exception;
|
||||
mac_dev->set_allmulti = memac_set_allmulti;
|
||||
mac_dev->set_multi = set_multi;
|
||||
mac_dev->start = start;
|
||||
mac_dev->stop = stop;
|
||||
|
|
|
@ -59,6 +59,7 @@ struct mac_device {
|
|||
bool rx_pause_active;
|
||||
bool tx_pause_active;
|
||||
bool promisc;
|
||||
bool allmulti;
|
||||
|
||||
int (*init)(struct mac_device *mac_dev);
|
||||
int (*start)(struct mac_device *mac_dev);
|
||||
|
@ -66,6 +67,7 @@ struct mac_device {
|
|||
void (*adjust_link)(struct mac_device *mac_dev);
|
||||
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
|
||||
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
|
||||
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
|
||||
int (*set_multi)(struct net_device *net_dev,
|
||||
struct mac_device *mac_dev);
|
||||
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
|
||||
|
|
Loading…
Reference in New Issue