Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates

This series contains updates to i40e only.

Jesse provides 6 patches against i40e.  First is a patch to reduce
CPU utilization by reducing read-flush to read in the hot path.  Next
couple of patches resolve coverity issues reported by Hannes Frederic
Sowa <hannes@stressinduktion.org>.  Then Jesse refactored i40e to cleanup
functions which used cpu_to_xxx(foo) which caused a lot of line wrapping.

Mitch provides 2 i40e patches.  First fixes a panic when tx_rings[0]
are not allocated, his second patch corrects a math error when
assigning MSI-X vectors to VFs.  The vectors-per-vf value reported
by the hardware already conveniently reports one less than the actual
value.

Shannon provides 5 patches against i40e.  His first patch corrects a
number of little bugs in the error handling of irq setup, most of
which ended up panicing the kernel.  Next he fixes the overactive
IRQ issue seen in testing and allows the use of the legacy interrupt.
Shannon then provides a cleanup of the arguments declared at the
beginning of each function.  Then he provides a patch to make sure
that there are really rings and queues before trying to dump
information in them.  Lastly he simplifies the code by using an
already existing variable.

Catherine provides an i40e patch to bump the version.

v2:
 - Remove unneeded parenthesis in patch 3 based on feedback from
   Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
 - Fix patch description for patch 11 based on feedback from
   Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-10-22 15:52:06 -04:00
commit 666d100859
5 changed files with 147 additions and 128 deletions

View File

@ -545,6 +545,7 @@ static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {} static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/ #endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector); void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi); void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid); int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);

View File

@ -151,9 +151,7 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct i40e_pf *pf = filp->private_data; struct i40e_pf *pf = filp->private_data;
char dump_request_buf[16];
bool seid_found = false; bool seid_found = false;
int bytes_not_copied;
long seid = -1; long seid = -1;
int buflen = 0; int buflen = 0;
int i, ret; int i, ret;
@ -163,21 +161,12 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
/* don't allow partial writes */ /* don't allow partial writes */
if (*ppos != 0) if (*ppos != 0)
return 0; return 0;
if (count >= sizeof(dump_request_buf))
return -ENOSPC;
bytes_not_copied = copy_from_user(dump_request_buf, buffer, count);
if (bytes_not_copied < 0)
return bytes_not_copied;
if (bytes_not_copied > 0)
count -= bytes_not_copied;
dump_request_buf[count] = '\0';
/* decode the SEID given to be dumped */ /* decode the SEID given to be dumped */
ret = kstrtol(dump_request_buf, 0, &seid); ret = kstrtol_from_user(buffer, count, 0, &seid);
if (ret < 0) {
dev_info(&pf->pdev->dev, "bad seid value '%s'\n", if (ret) {
dump_request_buf); dev_info(&pf->pdev->dev, "bad seid value\n");
} else if (seid == 0) { } else if (seid == 0) {
seid_found = true; seid_found = true;
@ -245,26 +234,33 @@ static ssize_t i40e_dbg_dump_write(struct file *filp,
memcpy(p, vsi, len); memcpy(p, vsi, len);
p += len; p += len;
len = (sizeof(struct i40e_q_vector) if (vsi->num_q_vectors) {
* vsi->num_q_vectors); len = (sizeof(struct i40e_q_vector)
memcpy(p, vsi->q_vectors, len); * vsi->num_q_vectors);
p += len; memcpy(p, vsi->q_vectors, len);
len = (sizeof(struct i40e_ring) * vsi->num_queue_pairs);
memcpy(p, vsi->tx_rings, len);
p += len;
memcpy(p, vsi->rx_rings, len);
p += len;
for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_tx_buffer);
memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len; p += len;
} }
for (i = 0; i < vsi->num_queue_pairs; i++) {
len = sizeof(struct i40e_rx_buffer); if (vsi->num_queue_pairs) {
memcpy(p, vsi->rx_rings[i]->rx_bi, len); len = (sizeof(struct i40e_ring) *
vsi->num_queue_pairs);
memcpy(p, vsi->tx_rings, len);
p += len; p += len;
memcpy(p, vsi->rx_rings, len);
p += len;
}
if (vsi->tx_rings[0]) {
len = sizeof(struct i40e_tx_buffer);
for (i = 0; i < vsi->num_queue_pairs; i++) {
memcpy(p, vsi->tx_rings[i]->tx_bi, len);
p += len;
}
len = sizeof(struct i40e_rx_buffer);
for (i = 0; i < vsi->num_queue_pairs; i++) {
memcpy(p, vsi->rx_rings[i]->rx_bi, len);
p += len;
}
} }
/* macvlan filter list */ /* macvlan filter list */
@ -1023,11 +1019,11 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct i40e_pf *pf = filp->private_data; struct i40e_pf *pf = filp->private_data;
char *cmd_buf, *cmd_buf_tmp;
int bytes_not_copied; int bytes_not_copied;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
u8 *print_buf_start; u8 *print_buf_start;
u8 *print_buf; u8 *print_buf;
char *cmd_buf;
int vsi_seid; int vsi_seid;
int veb_seid; int veb_seid;
int cnt; int cnt;
@ -1046,6 +1042,12 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
count -= bytes_not_copied; count -= bytes_not_copied;
cmd_buf[count] = '\0'; cmd_buf[count] = '\0';
cmd_buf_tmp = strchr(cmd_buf, '\n');
if (cmd_buf_tmp) {
*cmd_buf_tmp = '\0';
count = cmd_buf_tmp - cmd_buf + 1;
}
print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL); print_buf_start = kzalloc(I40E_MAX_DEBUG_OUT_BUFFER, GFP_KERNEL);
if (!print_buf_start) if (!print_buf_start)
goto command_write_done; goto command_write_done;
@ -1152,9 +1154,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_veb_release(pf->veb[i]); i40e_veb_release(pf->veb[i]);
} else if (strncmp(cmd_buf, "add macaddr", 11) == 0) { } else if (strncmp(cmd_buf, "add macaddr", 11) == 0) {
u8 ma[6];
int vlan = 0;
struct i40e_mac_filter *f; struct i40e_mac_filter *f;
int vlan = 0;
u8 ma[6];
int ret; int ret;
cnt = sscanf(&cmd_buf[11], cnt = sscanf(&cmd_buf[11],
@ -1190,8 +1192,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, f, ret); ma, vlan, vsi_seid, f, ret);
} else if (strncmp(cmd_buf, "del macaddr", 11) == 0) { } else if (strncmp(cmd_buf, "del macaddr", 11) == 0) {
u8 ma[6];
int vlan = 0; int vlan = 0;
u8 ma[6];
int ret; int ret;
cnt = sscanf(&cmd_buf[11], cnt = sscanf(&cmd_buf[11],
@ -1227,9 +1229,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
ma, vlan, vsi_seid, ret); ma, vlan, vsi_seid, ret);
} else if (strncmp(cmd_buf, "add pvid", 8) == 0) { } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
int v;
u16 vid;
i40e_status ret; i40e_status ret;
u16 vid;
int v;
cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v); cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
if (cnt != 2) { if (cnt != 2) {
@ -1540,10 +1542,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) || } else if ((strncmp(cmd_buf, "add fd_filter", 13) == 0) ||
(strncmp(cmd_buf, "rem fd_filter", 13) == 0)) { (strncmp(cmd_buf, "rem fd_filter", 13) == 0)) {
struct i40e_fdir_data fd_data; struct i40e_fdir_data fd_data;
int ret;
u16 packet_len, i, j = 0; u16 packet_len, i, j = 0;
char *asc_packet; char *asc_packet;
bool add = false; bool add = false;
int ret;
asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP, asc_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_LOOKUP,
GFP_KERNEL); GFP_KERNEL);
@ -1631,9 +1633,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} }
} else if (strncmp(&cmd_buf[5], } else if (strncmp(&cmd_buf[5],
"get local", 9) == 0) { "get local", 9) == 0) {
u16 llen, rlen;
int ret, i; int ret, i;
u8 *buff; u8 *buff;
u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff) if (!buff)
goto command_write_done; goto command_write_done;
@ -1664,9 +1666,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
kfree(buff); kfree(buff);
buff = NULL; buff = NULL;
} else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) { } else if (strncmp(&cmd_buf[5], "get remote", 10) == 0) {
u16 llen, rlen;
int ret, i; int ret, i;
u8 *buff; u8 *buff;
u16 llen, rlen;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL); buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff) if (!buff)
goto command_write_done; goto command_write_done;
@ -1742,11 +1744,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done; goto command_write_done;
} }
/* Read at least 512 words */ /* set the max length */
if (buffer_len == 0) buffer_len = min_t(u16, buffer_len, I40E_MAX_AQ_BUF_SIZE/2);
buffer_len = 512;
bytes = 2 * buffer_len; bytes = 2 * buffer_len;
/* read at least 1k bytes, no more than 4kB */
bytes = clamp(bytes, (u16)1024, (u16)I40E_MAX_AQ_BUF_SIZE);
buff = kzalloc(bytes, GFP_KERNEL); buff = kzalloc(bytes, GFP_KERNEL);
if (!buff) if (!buff)
goto command_write_done; goto command_write_done;
@ -1898,6 +1902,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
struct i40e_pf *pf = filp->private_data; struct i40e_pf *pf = filp->private_data;
int bytes_not_copied; int bytes_not_copied;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
char *buf_tmp;
int vsi_seid; int vsi_seid;
int i, cnt; int i, cnt;
@ -1916,6 +1921,12 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count -= bytes_not_copied; count -= bytes_not_copied;
i40e_dbg_netdev_ops_buf[count] = '\0'; i40e_dbg_netdev_ops_buf[count] = '\0';
buf_tmp = strchr(i40e_dbg_netdev_ops_buf, '\n');
if (buf_tmp) {
*buf_tmp = '\0';
count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
}
if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid); cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
if (cnt != 1) { if (cnt != 1) {
@ -2019,21 +2030,35 @@ static const struct file_operations i40e_dbg_netdev_ops_fops = {
**/ **/
void i40e_dbg_pf_init(struct i40e_pf *pf) void i40e_dbg_pf_init(struct i40e_pf *pf)
{ {
struct dentry *pfile __attribute__((unused)); struct dentry *pfile;
const char *name = pci_name(pf->pdev); const char *name = pci_name(pf->pdev);
const struct device *dev = &pf->pdev->dev;
pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root);
if (pf->i40e_dbg_pf) { if (!pf->i40e_dbg_pf)
pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, return;
pf, &i40e_dbg_command_fops);
pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf,
&i40e_dbg_dump_fops); &i40e_dbg_command_fops);
pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, if (!pfile)
pf, &i40e_dbg_netdev_ops_fops); goto create_failed;
} else {
dev_info(&pf->pdev->dev, pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
"debugfs entry for %s failed\n", name); &i40e_dbg_dump_fops);
} if (!pfile)
goto create_failed;
pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
&i40e_dbg_netdev_ops_fops);
if (!pfile)
goto create_failed;
return;
create_failed:
dev_info(dev, "debugfs dir/file for %s failed\n", name);
debugfs_remove_recursive(pf->i40e_dbg_pf);
return;
} }
/** /**

View File

@ -36,7 +36,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0 #define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3 #define DRV_VERSION_MINOR 3
#define DRV_VERSION_BUILD 10 #define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
@ -2174,8 +2174,8 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
/* Now associate this queue with this PCI function */ /* Now associate this queue with this PCI function */
qtx_ctl = I40E_QTX_CTL_PF_QUEUE; qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
& I40E_QTX_CTL_PF_INDX_MASK); I40E_QTX_CTL_PF_INDX_MASK);
wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl); wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
i40e_flush(hw); i40e_flush(hw);
@ -2532,7 +2532,7 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure * @pf: board private structure
**/ **/
static void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
{ {
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 val; u32 val;
@ -2560,7 +2560,7 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val); wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
i40e_flush(hw); /* skip the flush */
} }
/** /**
@ -2709,6 +2709,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
i40e_irq_dynamic_enable_icr0(pf); i40e_irq_dynamic_enable_icr0(pf);
} }
i40e_flush(&pf->hw);
return 0; return 0;
} }
@ -2741,14 +2742,14 @@ static irqreturn_t i40e_intr(int irq, void *data)
icr0 = rd32(hw, I40E_PFINT_ICR0); icr0 = rd32(hw, I40E_PFINT_ICR0);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
return IRQ_NONE;
val = rd32(hw, I40E_PFINT_DYN_CTL0); val = rd32(hw, I40E_PFINT_DYN_CTL0);
val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK; val = val | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
wr32(hw, I40E_PFINT_DYN_CTL0, val); wr32(hw, I40E_PFINT_DYN_CTL0, val);
/* if sharing a legacy IRQ, we might get called w/o an intr pending */
if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
return IRQ_NONE;
ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA); ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
@ -2762,7 +2763,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
qval = rd32(hw, I40E_QINT_TQCTL(0)); qval = rd32(hw, I40E_QINT_TQCTL(0));
qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK; qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval); wr32(hw, I40E_QINT_TQCTL(0), qval);
i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) if (!test_bit(__I40E_DOWN, &pf->state))
napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi);
@ -2824,7 +2824,6 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* re-enable interrupt causes */ /* re-enable interrupt causes */
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
i40e_flush(hw);
if (!test_bit(__I40E_DOWN, &pf->state)) { if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf); i40e_irq_dynamic_enable_icr0(pf);
@ -4614,7 +4613,8 @@ static void i40e_fdir_setup(struct i40e_pf *pf)
bool new_vsi = false; bool new_vsi = false;
int err, i; int err, i;
if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED|I40E_FLAG_FDIR_ATR_ENABLED))) if (!(pf->flags & (I40E_FLAG_FDIR_ENABLED |
I40E_FLAG_FDIR_ATR_ENABLED)))
return; return;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
@ -5159,11 +5159,12 @@ static s32 i40e_vsi_clear_rings(struct i40e_vsi *vsi)
{ {
int i; int i;
for (i = 0; i < vsi->alloc_queue_pairs; i++) { if (vsi->tx_rings[0])
kfree_rcu(vsi->tx_rings[i], rcu); for (i = 0; i < vsi->alloc_queue_pairs; i++) {
vsi->tx_rings[i] = NULL; kfree_rcu(vsi->tx_rings[i], rcu);
vsi->rx_rings[i] = NULL; vsi->tx_rings[i] = NULL;
} vsi->rx_rings[i] = NULL;
}
return 0; return 0;
} }
@ -5433,7 +5434,8 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_MSIX_ENABLED) { if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
err = i40e_init_msix(pf); err = i40e_init_msix(pf);
if (err) { if (err) {
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_MQ_ENABLED | I40E_FLAG_MQ_ENABLED |
I40E_FLAG_DCB_ENABLED | I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED | I40E_FLAG_SRIOV_ENABLED |
@ -5448,14 +5450,17 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) { (pf->flags & I40E_FLAG_MSI_ENABLED)) {
dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
err = pci_enable_msi(pf->pdev); err = pci_enable_msi(pf->pdev);
if (err) { if (err) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
"MSI init failed (%d), trying legacy.\n", err);
pf->flags &= ~I40E_FLAG_MSI_ENABLED; pf->flags &= ~I40E_FLAG_MSI_ENABLED;
} }
} }
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
/* track first vector for misc interrupts */ /* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1); err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
} }
@ -6108,8 +6113,9 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
goto vector_setup_out; goto vector_setup_out;
} }
vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, if (vsi->num_q_vectors)
vsi->num_q_vectors, vsi->idx); vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
vsi->num_q_vectors, vsi->idx);
if (vsi->base_vector < 0) { if (vsi->base_vector < 0) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"failed to get q tracking for VSI %d, err=%d\n", "failed to get q tracking for VSI %d, err=%d\n",

View File

@ -37,6 +37,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT)); ((u64)td_tag << I40E_TXD_QW1_L2TAG1_SHIFT));
} }
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/** /**
* i40e_program_fdir_filter - Program a Flow Director filter * i40e_program_fdir_filter - Program a Flow Director filter
* @fdir_input: Packet data that will be filter parameters * @fdir_input: Packet data that will be filter parameters
@ -50,6 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
struct i40e_tx_buffer *tx_buf; struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_desc; struct i40e_tx_desc *tx_desc;
struct i40e_ring *tx_ring; struct i40e_ring *tx_ring;
unsigned int fpt, dcc;
struct i40e_vsi *vsi; struct i40e_vsi *vsi;
struct device *dev; struct device *dev;
dma_addr_t dma; dma_addr_t dma;
@ -68,7 +70,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
dev = tx_ring->dev; dev = tx_ring->dev;
dma = dma_map_single(dev, fdir_data->raw_packet, dma = dma_map_single(dev, fdir_data->raw_packet,
I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE); I40E_FDIR_MAX_RAW_PACKET_LOOKUP, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma)) if (dma_mapping_error(dev, dma))
goto dma_fail; goto dma_fail;
@ -77,74 +79,61 @@ int i40e_program_fdir_filter(struct i40e_fdir_data *fdir_data,
fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
tx_buf = &tx_ring->tx_bi[i]; tx_buf = &tx_ring->tx_bi[i];
i++; tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32((fdir_data->q_index fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
<< I40E_TXD_FLTR_QW0_QINDEX_SHIFT) I40E_TXD_FLTR_QW0_QINDEX_MASK;
& I40E_TXD_FLTR_QW0_QINDEX_MASK);
fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->flex_off fpt |= (fdir_data->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) &
<< I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT) I40E_TXD_FLTR_QW0_FLEXOFF_MASK;
& I40E_TXD_FLTR_QW0_FLEXOFF_MASK);
fdir_desc->qindex_flex_ptype_vsi |= cpu_to_le32((fdir_data->pctype fpt |= (fdir_data->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) &
<< I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) I40E_TXD_FLTR_QW0_PCTYPE_MASK;
& I40E_TXD_FLTR_QW0_PCTYPE_MASK);
/* Use LAN VSI Id if not programmed by user */ /* Use LAN VSI Id if not programmed by user */
if (fdir_data->dest_vsi == 0) if (fdir_data->dest_vsi == 0)
fdir_desc->qindex_flex_ptype_vsi |= fpt |= (pf->vsi[pf->lan_vsi]->id) <<
cpu_to_le32((pf->vsi[pf->lan_vsi]->id) I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT);
else else
fdir_desc->qindex_flex_ptype_vsi |= fpt |= ((u32)fdir_data->dest_vsi <<
cpu_to_le32((fdir_data->dest_vsi I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
<< I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
& I40E_TXD_FLTR_QW0_DEST_VSI_MASK);
fdir_desc->dtype_cmd_cntindex = fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
cpu_to_le32(I40E_TX_DESC_DTYPE_FILTER_PROG);
dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
if (add) if (add)
fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE I40E_TXD_FLTR_QW1_PCMD_SHIFT;
<< I40E_TXD_FLTR_QW1_PCMD_SHIFT);
else else
fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( dcc |= I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE I40E_TXD_FLTR_QW1_PCMD_SHIFT;
<< I40E_TXD_FLTR_QW1_PCMD_SHIFT);
fdir_desc->dtype_cmd_cntindex |= cpu_to_le32((fdir_data->dest_ctl dcc |= (fdir_data->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT) &
<< I40E_TXD_FLTR_QW1_DEST_SHIFT) I40E_TXD_FLTR_QW1_DEST_MASK;
& I40E_TXD_FLTR_QW1_DEST_MASK);
fdir_desc->dtype_cmd_cntindex |= cpu_to_le32( dcc |= (fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) &
(fdir_data->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT) I40E_TXD_FLTR_QW1_FD_STATUS_MASK;
& I40E_TXD_FLTR_QW1_FD_STATUS_MASK);
if (fdir_data->cnt_index != 0) { if (fdir_data->cnt_index != 0) {
fdir_desc->dtype_cmd_cntindex |= dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
cpu_to_le32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); dcc |= ((u32)fdir_data->cnt_index <<
fdir_desc->dtype_cmd_cntindex |= I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
cpu_to_le32((fdir_data->cnt_index I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
<< I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
& I40E_TXD_FLTR_QW1_CNTINDEX_MASK);
} }
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id); fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
/* Now program a dummy descriptor */ /* Now program a dummy descriptor */
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
tx_desc = I40E_TX_DESC(tx_ring, i); tx_desc = I40E_TX_DESC(tx_ring, i);
i++; tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->buffer_addr = cpu_to_le64(dma);
td_cmd = I40E_TX_DESC_CMD_EOP | td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
I40E_TX_DESC_CMD_RS |
I40E_TX_DESC_CMD_DUMMY;
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0); build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_LOOKUP, 0);
@ -559,8 +548,6 @@ static void i40e_update_dynamic_itr(struct i40e_q_vector *q_vector)
i40e_set_new_dynamic_itr(&q_vector->tx); i40e_set_new_dynamic_itr(&q_vector->tx);
if (old_itr != q_vector->tx.itr) if (old_itr != q_vector->tx.itr)
wr32(hw, reg_addr, q_vector->tx.itr); wr32(hw, reg_addr, q_vector->tx.itr);
i40e_flush(hw);
} }
/** /**
@ -1155,7 +1142,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
qval = rd32(hw, I40E_QINT_TQCTL(0)); qval = rd32(hw, I40E_QINT_TQCTL(0));
qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK; qval |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval); wr32(hw, I40E_QINT_TQCTL(0), qval);
i40e_flush(hw);
i40e_irq_dynamic_enable_icr0(vsi->back);
} }
} }
@ -1256,7 +1244,6 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
} }
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
/** /**
* i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer * @skb: send buffer

View File

@ -251,7 +251,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_idx,
reg_idx = I40E_VPINT_LNKLST0(vf->vf_id); reg_idx = I40E_VPINT_LNKLST0(vf->vf_id);
else else
reg_idx = I40E_VPINT_LNKLSTN( reg_idx = I40E_VPINT_LNKLSTN(
((pf->hw.func_caps.num_msix_vectors_vf - 1) (pf->hw.func_caps.num_msix_vectors_vf
* vf->vf_id) + (vector_id - 1)); * vf->vf_id) + (vector_id - 1));
if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) { if (vecmap->rxq_map == 0 && vecmap->txq_map == 0) {
@ -383,7 +383,7 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
/* associate this queue with the PCI VF function */ /* associate this queue with the PCI VF function */
qtx_ctl = I40E_QTX_CTL_VF_QUEUE; qtx_ctl = I40E_QTX_CTL_VF_QUEUE;
qtx_ctl |= ((hw->hmc.hmc_fn_id << I40E_QTX_CTL_PF_INDX_SHIFT) qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT)
& I40E_QTX_CTL_PF_INDX_MASK); & I40E_QTX_CTL_PF_INDX_MASK);
qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id) qtx_ctl |= (((vf->vf_id + hw->func_caps.vf_base_id)
<< I40E_QTX_CTL_VFVM_INDX_SHIFT) << I40E_QTX_CTL_VFVM_INDX_SHIFT)