Add support for the latest 1G/10G Chelsio adapter, T3.

This driver is required by the Chelsio T3 RDMA driver posted by
Steve Wise.

Signed-off-by: Divy Le Ray <divy@chelsio.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
Divy Le Ray 2007-01-18 22:04:14 -05:00 committed by Jeff Garzik
parent 0bf94faf64
commit 4d22de3e6c
25 changed files with 17328 additions and 0 deletions

View File

@ -2389,6 +2389,24 @@ config CHELSIO_T1_NAPI
NAPI is a driver API designed to reduce CPU and interrupt load NAPI is a driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card. when the driver is receiving lots of packets from the card.
config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on PCI
help
This driver supports Chelsio T3-based gigabit and 10Gb Ethernet
adapters.
For general information about Chelsio and our products, visit
our website at <http://www.chelsio.com>.
For customer support, please visit our customer support page at
<http://www.chelsio.com/support.htm>.
Please send feedback to <linux-bugs@chelsio.com>.
To compile this driver as a module, choose M here: the module
will be called cxgb3.
config EHEA config EHEA
tristate "eHEA Ethernet support" tristate "eHEA Ethernet support"
depends on IBMEBUS depends on IBMEBUS

View File

@ -6,6 +6,7 @@ obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_IBM_EMAC) += ibm_emac/ obj-$(CONFIG_IBM_EMAC) += ibm_emac/
obj-$(CONFIG_IXGB) += ixgb/ obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_CHELSIO_T1) += chelsio/ obj-$(CONFIG_CHELSIO_T1) += chelsio/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_EHEA) += ehea/ obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_BONDING) += bonding/ obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o obj-$(CONFIG_GIANFAR) += gianfar_driver.o

View File

@ -0,0 +1,8 @@
#
# Chelsio T3 driver
#
obj-$(CONFIG_CHELSIO_T3) += cxgb3.o
cxgb3-objs := cxgb3_main.o ael1002.o vsc8211.o t3_hw.o mc5.o \
xgmac.o sge.o l2t.o cxgb3_offload.o

255
drivers/net/cxgb3/adapter.h Normal file
View File

@ -0,0 +1,255 @@
/*
* This file is part of the Chelsio T3 Ethernet driver for Linux.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
/* This file should not be included directly. Include common.h instead. */
#ifndef __T3_ADAPTER_H__
#define __T3_ADAPTER_H__
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cache.h>
#include "t3cdev.h"
#include <asm/semaphore.h>
#include <asm/bitops.h>
#include <asm/io.h>
typedef irqreturn_t(*intr_handler_t) (int, void *);
struct vlan_group;
struct port_info {
struct vlan_group *vlan_grp;
const struct port_type_info *port_type;
u8 port_id;
u8 rx_csum_offload;
u8 nqsets;
u8 first_qset;
struct cphy phy;
struct cmac mac;
struct link_config link_config;
struct net_device_stats netstats;
int activity;
};
enum { /* adapter flags */
FULL_INIT_DONE = (1 << 0),
USING_MSI = (1 << 1),
USING_MSIX = (1 << 2),
};
struct rx_desc;
struct rx_sw_desc;
struct sge_fl { /* SGE per free-buffer list state */
unsigned int buf_size; /* size of each Rx buffer */
unsigned int credits; /* # of available Rx buffers */
unsigned int size; /* capacity of free list */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* free list generation */
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
unsigned int cntxt_id; /* SGE context id for the free list */
unsigned long empty; /* # of times queue ran out of buffers */
};
/*
* Bundle size for grouping offload RX packets for delivery to the stack.
* Don't make this too big as we do prefetch on each packet in a bundle.
*/
# define RX_BUNDLE_SIZE 8
struct rsp_desc;
struct sge_rspq { /* state for an SGE response queue */
unsigned int credits; /* # of pending response credits */
unsigned int size; /* capacity of response queue */
unsigned int cidx; /* consumer index */
unsigned int gen; /* current generation bit */
unsigned int polling; /* is the queue serviced through NAPI? */
unsigned int holdoff_tmr; /* interrupt holdoff timer in 100ns */
unsigned int next_holdoff; /* holdoff time for next interrupt */
struct rsp_desc *desc; /* address of HW response ring */
dma_addr_t phys_addr; /* physical address of the ring */
unsigned int cntxt_id; /* SGE context id for the response q */
spinlock_t lock; /* guards response processing */
struct sk_buff *rx_head; /* offload packet receive queue head */
struct sk_buff *rx_tail; /* offload packet receive queue tail */
unsigned long offload_pkts;
unsigned long offload_bundles;
unsigned long eth_pkts; /* # of ethernet packets */
unsigned long pure_rsps; /* # of pure (non-data) responses */
unsigned long imm_data; /* responses with immediate data */
unsigned long rx_drops; /* # of packets dropped due to no mem */
unsigned long async_notif; /* # of asynchronous notification events */
unsigned long empty; /* # of times queue ran out of credits */
unsigned long nomem; /* # of responses deferred due to no mem */
unsigned long unhandled_irqs; /* # of spurious intrs */
};
struct tx_desc;
struct tx_sw_desc;
struct sge_txq { /* state for an SGE Tx queue */
unsigned long flags; /* HW DMA fetch status */
unsigned int in_use; /* # of in-use Tx descriptors */
unsigned int size; /* # of descriptors */
unsigned int processed; /* total # of descs HW has processed */
unsigned int cleaned; /* total # of descs SW has reclaimed */
unsigned int stop_thres; /* SW TX queue suspend threshold */
unsigned int cidx; /* consumer index */
unsigned int pidx; /* producer index */
unsigned int gen; /* current value of generation bit */
unsigned int unacked; /* Tx descriptors used since last COMPL */
struct tx_desc *desc; /* address of HW Tx descriptor ring */
struct tx_sw_desc *sdesc; /* address of SW Tx descriptor ring */
spinlock_t lock; /* guards enqueueing of new packets */
unsigned int token; /* WR token */
dma_addr_t phys_addr; /* physical address of the ring */
struct sk_buff_head sendq; /* List of backpressured offload packets */
struct tasklet_struct qresume_tsk; /* restarts the queue */
unsigned int cntxt_id; /* SGE context id for the Tx q */
unsigned long stops; /* # of times q has been stopped */
unsigned long restarts; /* # of queue restarts */
};
enum { /* per port SGE statistics */
SGE_PSTAT_TSO, /* # of TSO requests */
SGE_PSTAT_RX_CSUM_GOOD, /* # of successful RX csum offloads */
SGE_PSTAT_TX_CSUM, /* # of TX checksum offloads */
SGE_PSTAT_VLANEX, /* # of VLAN tag extractions */
SGE_PSTAT_VLANINS, /* # of VLAN tag insertions */
SGE_PSTAT_MAX /* must be last */
};
struct sge_qset { /* an SGE queue set */
struct sge_rspq rspq;
struct sge_fl fl[SGE_RXQ_PER_SET];
struct sge_txq txq[SGE_TXQ_PER_SET];
struct net_device *netdev; /* associated net device */
unsigned long txq_stopped; /* which Tx queues are stopped */
struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
unsigned long port_stats[SGE_PSTAT_MAX];
} ____cacheline_aligned;
struct sge {
struct sge_qset qs[SGE_QSETS];
spinlock_t reg_lock; /* guards non-atomic SGE registers (eg context) */
};
struct adapter {
struct t3cdev tdev;
struct list_head adapter_list;
void __iomem *regs;
struct pci_dev *pdev;
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
const char *name;
int msg_enable;
unsigned int mmio_len;
struct adapter_params params;
unsigned int slow_intr_mask;
unsigned long irq_stats[IRQ_NUM_STATS];
struct {
unsigned short vec;
char desc[22];
} msix_info[SGE_QSETS + 1];
/* T3 modules */
struct sge sge;
struct mc7 pmrx;
struct mc7 pmtx;
struct mc7 cm;
struct mc5 mc5;
struct net_device *port[MAX_NPORTS];
unsigned int check_task_cnt;
struct delayed_work adap_check_task;
struct work_struct ext_intr_handler_task;
/*
* Dummy netdevices are needed when using multiple receive queues with
* NAPI as each netdevice can service only one queue.
*/
struct net_device *dummy_netdev[SGE_QSETS - 1];
struct dentry *debugfs_root;
struct mutex mdio_lock;
spinlock_t stats_lock;
spinlock_t work_lock;
};
static inline u32 t3_read_reg(struct adapter *adapter, u32 reg_addr)
{
u32 val = readl(adapter->regs + reg_addr);
CH_DBG(adapter, MMIO, "read register 0x%x value 0x%x\n", reg_addr, val);
return val;
}
static inline void t3_write_reg(struct adapter *adapter, u32 reg_addr, u32 val)
{
CH_DBG(adapter, MMIO, "setting register 0x%x to 0x%x\n", reg_addr, val);
writel(val, adapter->regs + reg_addr);
}
static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
{
return netdev_priv(adap->port[idx]);
}
/*
* We use the spare atalk_ptr to map a net device to its SGE queue set.
* This is a macro so it can be used as l-value.
*/
#define dev2qset(netdev) ((netdev)->atalk_ptr)
#define OFFLOAD_DEVMAP_BIT 15
#define tdev2adap(d) container_of(d, struct adapter, tdev)
static inline int offload_running(struct adapter *adapter)
{
return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
}
int t3_offload_tx(struct t3cdev *tdev, struct sk_buff *skb);
void t3_os_ext_intr_handler(struct adapter *adapter);
void t3_os_link_changed(struct adapter *adapter, int port_id, int link_status,
int speed, int duplex, int fc);
void t3_sge_start(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
void t3_free_sge_resources(struct adapter *adap);
void t3_sge_err_intr_handler(struct adapter *adapter);
intr_handler_t t3_intr_handler(struct adapter *adap, int polling);
int t3_eth_xmit(struct sk_buff *skb, struct net_device *dev);
void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int irq_vec_idx, const struct qset_params *p,
int ntxq, struct net_device *netdev);
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
#endif /* __T3_ADAPTER_H__ */

231
drivers/net/cxgb3/ael1002.c Normal file
View File

@ -0,0 +1,231 @@
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
#include "regs.h"
enum {
AEL100X_TX_DISABLE = 9,
AEL100X_TX_CONFIG1 = 0xc002,
AEL1002_PWR_DOWN_HI = 0xc011,
AEL1002_PWR_DOWN_LO = 0xc012,
AEL1002_XFI_EQL = 0xc015,
AEL1002_LB_EN = 0xc017,
LASI_CTRL = 0x9002,
LASI_STAT = 0x9005
};
static void ael100x_txon(struct cphy *phy)
{
int tx_on_gpio = phy->addr == 0 ? F_GPIO7_OUT_VAL : F_GPIO2_OUT_VAL;
msleep(100);
t3_set_reg_field(phy->adapter, A_T3DBG_GPIO_EN, 0, tx_on_gpio);
msleep(30);
}
static int ael1002_power_down(struct cphy *phy, int enable)
{
int err;
err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_DISABLE, !!enable);
if (!err)
err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
return err;
}
static int ael1002_reset(struct cphy *phy, int wait)
{
int err;
if ((err = ael1002_power_down(phy, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL100X_TX_CONFIG1, 1)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_HI, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_PWR_DOWN_LO, 0)) ||
(err = mdio_write(phy, MDIO_DEV_PMA_PMD, AEL1002_XFI_EQL, 0x18)) ||
(err = t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, AEL1002_LB_EN,
0, 1 << 5)))
return err;
return 0;
}
static int ael1002_intr_noop(struct cphy *phy)
{
return 0;
}
static int ael100x_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &status);
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!err && !(status & BMSR_LSTATUS))
err = mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR,
&status);
if (err)
return err;
*link_ok = !!(status & BMSR_LSTATUS);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static struct cphy_ops ael1002_ops = {
.reset = ael1002_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = ael100x_get_link_status,
.power_down = ael1002_power_down,
};
void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1002_ops, mdio_ops);
ael100x_txon(phy);
}
static int ael1006_reset(struct cphy *phy, int wait)
{
return t3_phy_reset(phy, MDIO_DEV_PMA_PMD, wait);
}
static int ael1006_intr_enable(struct cphy *phy)
{
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 1);
}
static int ael1006_intr_disable(struct cphy *phy)
{
return mdio_write(phy, MDIO_DEV_PMA_PMD, LASI_CTRL, 0);
}
static int ael1006_intr_clear(struct cphy *phy)
{
u32 val;
return mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &val);
}
static int ael1006_intr_handler(struct cphy *phy)
{
unsigned int status;
int err = mdio_read(phy, MDIO_DEV_PMA_PMD, LASI_STAT, &status);
if (err)
return err;
return (status & 1) ? cphy_cause_link_change : 0;
}
static int ael1006_power_down(struct cphy *phy, int enable)
{
return t3_mdio_change_bits(phy, MDIO_DEV_PMA_PMD, MII_BMCR,
BMCR_PDOWN, enable ? BMCR_PDOWN : 0);
}
static struct cphy_ops ael1006_ops = {
.reset = ael1006_reset,
.intr_enable = ael1006_intr_enable,
.intr_disable = ael1006_intr_disable,
.intr_clear = ael1006_intr_clear,
.intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
.power_down = ael1006_power_down,
};
void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &ael1006_ops, mdio_ops);
ael100x_txon(phy);
}
static struct cphy_ops qt2045_ops = {
.reset = ael1006_reset,
.intr_enable = ael1006_intr_enable,
.intr_disable = ael1006_intr_disable,
.intr_clear = ael1006_intr_clear,
.intr_handler = ael1006_intr_handler,
.get_link_status = ael100x_get_link_status,
.power_down = ael1006_power_down,
};
void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
unsigned int stat;
cphy_init(phy, adapter, phy_addr, &qt2045_ops, mdio_ops);
/*
* Some cards where the PHY is supposed to be at address 0 actually
* have it at 1.
*/
if (!phy_addr && !mdio_read(phy, MDIO_DEV_PMA_PMD, MII_BMSR, &stat) &&
stat == 0xffff)
phy->addr = 1;
}
static int xaui_direct_reset(struct cphy *phy, int wait)
{
return 0;
}
static int xaui_direct_get_link_status(struct cphy *phy, int *link_ok,
int *speed, int *duplex, int *fc)
{
if (link_ok) {
unsigned int status;
status = t3_read_reg(phy->adapter,
XGM_REG(A_XGM_SERDES_STAT0, phy->addr));
*link_ok = !(status & F_LOWSIG0);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
return 0;
}
static int xaui_direct_power_down(struct cphy *phy, int enable)
{
return 0;
}
static struct cphy_ops xaui_direct_ops = {
.reset = xaui_direct_reset,
.intr_enable = ael1002_intr_noop,
.intr_disable = ael1002_intr_noop,
.intr_clear = ael1002_intr_noop,
.intr_handler = ael1002_intr_noop,
.get_link_status = xaui_direct_get_link_status,
.power_down = xaui_direct_power_down,
};
void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, 1, &xaui_direct_ops, mdio_ops);
}

709
drivers/net/cxgb3/common.h Normal file
View File

@ -0,0 +1,709 @@
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef __CHELSIO_COMMON_H
#define __CHELSIO_COMMON_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include "version.h"
#define CH_ERR(adap, fmt, ...) dev_err(&adap->pdev->dev, fmt, ## __VA_ARGS__)
#define CH_WARN(adap, fmt, ...) dev_warn(&adap->pdev->dev, fmt, ## __VA_ARGS__)
#define CH_ALERT(adap, fmt, ...) \
dev_printk(KERN_ALERT, &adap->pdev->dev, fmt, ## __VA_ARGS__)
/*
* More powerful macro that selectively prints messages based on msg_enable.
* For info and debugging messages.
*/
#define CH_MSG(adapter, level, category, fmt, ...) do { \
if ((adapter)->msg_enable & NETIF_MSG_##category) \
dev_printk(KERN_##level, &adapter->pdev->dev, fmt, \
## __VA_ARGS__); \
} while (0)
#ifdef DEBUG
# define CH_DBG(adapter, category, fmt, ...) \
CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
#else
# define CH_DBG(adapter, category, fmt, ...)
#endif
/* Additional NETIF_MSG_* categories */
#define NETIF_MSG_MMIO 0x8000000
struct t3_rx_mode {
struct net_device *dev;
struct dev_mc_list *mclist;
unsigned int idx;
};
static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
struct dev_mc_list *mclist)
{
p->dev = dev;
p->mclist = mclist;
p->idx = 0;
}
static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
{
u8 *addr = NULL;
if (rm->mclist && rm->idx < rm->dev->mc_count) {
addr = rm->mclist->dmi_addr;
rm->mclist = rm->mclist->next;
rm->idx++;
}
return addr;
}
enum {
MAX_NPORTS = 2, /* max # of ports */
MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
EEPROMSIZE = 8192, /* Serial EEPROM size */
RSS_TABLE_SIZE = 64, /* size of RSS lookup and mapping tables */
TCB_SIZE = 128, /* TCB size */
NMTUS = 16, /* size of MTU table */
NCCTRL_WIN = 32, /* # of congestion control windows */
};
#define MAX_RX_COALESCING_LEN 16224U
enum {
PAUSE_RX = 1 << 0,
PAUSE_TX = 1 << 1,
PAUSE_AUTONEG = 1 << 2
};
enum {
SUPPORTED_OFFLOAD = 1 << 24,
SUPPORTED_IRQ = 1 << 25
};
enum { /* adapter interrupt-maintained statistics */
STAT_ULP_CH0_PBL_OOB,
STAT_ULP_CH1_PBL_OOB,
STAT_PCI_CORR_ECC,
IRQ_NUM_STATS /* keep last */
};
enum {
SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
};
enum sge_context_type { /* SGE egress context types */
SGE_CNTXT_RDMA = 0,
SGE_CNTXT_ETH = 2,
SGE_CNTXT_OFLD = 4,
SGE_CNTXT_CTRL = 5
};
enum {
AN_PKT_SIZE = 32, /* async notification packet size */
IMMED_PKT_SIZE = 48 /* packet size for immediate data */
};
struct sg_ent { /* SGE scatter/gather entry */
u32 len[2];
u64 addr[2];
};
#ifndef SGE_NUM_GENBITS
/* Must be 1 or 2 */
# define SGE_NUM_GENBITS 2
#endif
#define TX_DESC_FLITS 16U
#define WR_FLITS (TX_DESC_FLITS + 1 - SGE_NUM_GENBITS)
struct cphy;
struct adapter;
struct mdio_ops {
int (*read)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*write)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
struct adapter_info {
unsigned char nports; /* # of ports */
unsigned char phy_base_addr; /* MDIO PHY base address */
unsigned char mdien;
unsigned char mdiinv;
unsigned int gpio_out; /* GPIO output settings */
unsigned int gpio_intr; /* GPIO IRQ enable mask */
unsigned long caps; /* adapter capabilities */
const struct mdio_ops *mdio_ops; /* MDIO operations */
const char *desc; /* product description */
};
struct port_type_info {
void (*phy_prep)(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *ops);
unsigned int caps;
const char *desc;
};
struct mc5_stats {
unsigned long parity_err;
unsigned long active_rgn_full;
unsigned long nfa_srch_err;
unsigned long unknown_cmd;
unsigned long reqq_parity_err;
unsigned long dispq_parity_err;
unsigned long del_act_empty;
};
struct mc7_stats {
unsigned long corr_err;
unsigned long uncorr_err;
unsigned long parity_err;
unsigned long addr_err;
};
struct mac_stats {
u64 tx_octets; /* total # of octets in good frames */
u64 tx_octets_bad; /* total # of octets in error frames */
u64 tx_frames; /* all good frames */
u64 tx_mcast_frames; /* good multicast frames */
u64 tx_bcast_frames; /* good broadcast frames */
u64 tx_pause; /* # of transmitted pause frames */
u64 tx_deferred; /* frames with deferred transmissions */
u64 tx_late_collisions; /* # of late collisions */
u64 tx_total_collisions; /* # of total collisions */
u64 tx_excess_collisions; /* frame errors from excessive collissions */
u64 tx_underrun; /* # of Tx FIFO underruns */
u64 tx_len_errs; /* # of Tx length errors */
u64 tx_mac_internal_errs; /* # of internal MAC errors on Tx */
u64 tx_excess_deferral; /* # of frames with excessive deferral */
u64 tx_fcs_errs; /* # of frames with bad FCS */
u64 tx_frames_64; /* # of Tx frames in a particular range */
u64 tx_frames_65_127;
u64 tx_frames_128_255;
u64 tx_frames_256_511;
u64 tx_frames_512_1023;
u64 tx_frames_1024_1518;
u64 tx_frames_1519_max;
u64 rx_octets; /* total # of octets in good frames */
u64 rx_octets_bad; /* total # of octets in error frames */
u64 rx_frames; /* all good frames */
u64 rx_mcast_frames; /* good multicast frames */
u64 rx_bcast_frames; /* good broadcast frames */
u64 rx_pause; /* # of received pause frames */
u64 rx_fcs_errs; /* # of received frames with bad FCS */
u64 rx_align_errs; /* alignment errors */
u64 rx_symbol_errs; /* symbol errors */
u64 rx_data_errs; /* data errors */
u64 rx_sequence_errs; /* sequence errors */
u64 rx_runt; /* # of runt frames */
u64 rx_jabber; /* # of jabber frames */
u64 rx_short; /* # of short frames */
u64 rx_too_long; /* # of oversized frames */
u64 rx_mac_internal_errs; /* # of internal MAC errors on Rx */
u64 rx_frames_64; /* # of Rx frames in a particular range */
u64 rx_frames_65_127;
u64 rx_frames_128_255;
u64 rx_frames_256_511;
u64 rx_frames_512_1023;
u64 rx_frames_1024_1518;
u64 rx_frames_1519_max;
u64 rx_cong_drops; /* # of Rx drops due to SGE congestion */
unsigned long tx_fifo_parity_err;
unsigned long rx_fifo_parity_err;
unsigned long tx_fifo_urun;
unsigned long rx_fifo_ovfl;
unsigned long serdes_signal_loss;
unsigned long xaui_pcs_ctc_err;
unsigned long xaui_pcs_align_change;
};
struct tp_mib_stats {
u32 ipInReceive_hi;
u32 ipInReceive_lo;
u32 ipInHdrErrors_hi;
u32 ipInHdrErrors_lo;
u32 ipInAddrErrors_hi;
u32 ipInAddrErrors_lo;
u32 ipInUnknownProtos_hi;
u32 ipInUnknownProtos_lo;
u32 ipInDiscards_hi;
u32 ipInDiscards_lo;
u32 ipInDelivers_hi;
u32 ipInDelivers_lo;
u32 ipOutRequests_hi;
u32 ipOutRequests_lo;
u32 ipOutDiscards_hi;
u32 ipOutDiscards_lo;
u32 ipOutNoRoutes_hi;
u32 ipOutNoRoutes_lo;
u32 ipReasmTimeout;
u32 ipReasmReqds;
u32 ipReasmOKs;
u32 ipReasmFails;
u32 reserved[8];
u32 tcpActiveOpens;
u32 tcpPassiveOpens;
u32 tcpAttemptFails;
u32 tcpEstabResets;
u32 tcpOutRsts;
u32 tcpCurrEstab;
u32 tcpInSegs_hi;
u32 tcpInSegs_lo;
u32 tcpOutSegs_hi;
u32 tcpOutSegs_lo;
u32 tcpRetransSeg_hi;
u32 tcpRetransSeg_lo;
u32 tcpInErrs_hi;
u32 tcpInErrs_lo;
u32 tcpRtoMin;
u32 tcpRtoMax;
};
struct tp_params {
unsigned int nchan; /* # of channels */
unsigned int pmrx_size; /* total PMRX capacity */
unsigned int pmtx_size; /* total PMTX capacity */
unsigned int cm_size; /* total CM capacity */
unsigned int chan_rx_size; /* per channel Rx size */
unsigned int chan_tx_size; /* per channel Tx size */
unsigned int rx_pg_size; /* Rx page size */
unsigned int tx_pg_size; /* Tx page size */
unsigned int rx_num_pgs; /* # of Rx pages */
unsigned int tx_num_pgs; /* # of Tx pages */
unsigned int ntimer_qs; /* # of timer queues */
};
struct qset_params { /* SGE queue set parameters */
unsigned int polling; /* polling/interrupt service for rspq */
unsigned int coalesce_usecs; /* irq coalescing timer */
unsigned int rspq_size; /* # of entries in response queue */
unsigned int fl_size; /* # of entries in regular free list */
unsigned int jumbo_size; /* # of entries in jumbo free list */
unsigned int txq_size[SGE_TXQ_PER_SET]; /* Tx queue sizes */
unsigned int cong_thres; /* FL congestion threshold */
};
struct sge_params {
unsigned int max_pkt_size; /* max offload pkt size */
struct qset_params qset[SGE_QSETS];
};
struct mc5_params {
unsigned int mode; /* selects MC5 width */
unsigned int nservers; /* size of server region */
unsigned int nfilters; /* size of filter region */
unsigned int nroutes; /* size of routing region */
};
/* Default MC5 region sizes */
enum {
DEFAULT_NSERVERS = 512,
DEFAULT_NFILTERS = 128
};
/* MC5 modes, these must be non-0 */
enum {
MC5_MODE_144_BIT = 1,
MC5_MODE_72_BIT = 2
};
struct vpd_params {
unsigned int cclk;
unsigned int mclk;
unsigned int uclk;
unsigned int mdc;
unsigned int mem_timing;
u8 eth_base[6];
u8 port_type[MAX_NPORTS];
unsigned short xauicfg[2];
};
struct pci_params {
unsigned int vpd_cap_addr;
unsigned int pcie_cap_addr;
unsigned short speed;
unsigned char width;
unsigned char variant;
};
enum {
PCI_VARIANT_PCI,
PCI_VARIANT_PCIX_MODE1_PARITY,
PCI_VARIANT_PCIX_MODE1_ECC,
PCI_VARIANT_PCIX_266_MODE2,
PCI_VARIANT_PCIE
};
struct adapter_params {
struct sge_params sge;
struct mc5_params mc5;
struct tp_params tp;
struct vpd_params vpd;
struct pci_params pci;
const struct adapter_info *info;
unsigned short mtus[NMTUS];
unsigned short a_wnd[NCCTRL_WIN];
unsigned short b_wnd[NCCTRL_WIN];
unsigned int nports; /* # of ethernet ports */
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */
};
struct trace_params {
u32 sip;
u32 sip_mask;
u32 dip;
u32 dip_mask;
u16 sport;
u16 sport_mask;
u16 dport;
u16 dport_mask;
u32 vlan:12;
u32 vlan_mask:12;
u32 intf:4;
u32 intf_mask:4;
u8 proto;
u8 proto_mask;
};
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned char requested_duplex; /* duplex user has requested */
unsigned char duplex; /* actual link duplex */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned int link_ok; /* link up? */
};
#define SPEED_INVALID 0xffff
#define DUPLEX_INVALID 0xff
struct mc5 {
struct adapter *adapter;
unsigned int tcam_size;
unsigned char part_type;
unsigned char parity_enabled;
unsigned char mode;
struct mc5_stats stats;
};
static inline unsigned int t3_mc5_size(const struct mc5 *p)
{
return p->tcam_size;
}
struct mc7 {
struct adapter *adapter; /* backpointer to adapter */
unsigned int size; /* memory size in bytes */
unsigned int width; /* MC7 interface width */
unsigned int offset; /* register address offset for MC7 instance */
const char *name; /* name of MC7 instance */
struct mc7_stats stats; /* MC7 statistics */
};
static inline unsigned int t3_mc7_size(const struct mc7 *p)
{
return p->size;
}
struct cmac {
struct adapter *adapter;
unsigned int offset;
unsigned int nucast; /* # of address filters for unicast MACs */
struct mac_stats stats;
};
enum {
MAC_DIRECTION_RX = 1,
MAC_DIRECTION_TX = 2,
MAC_RXFIFO_SIZE = 32768
};
/* IEEE 802.3ae specified MDIO devices */
enum {
MDIO_DEV_PMA_PMD = 1,
MDIO_DEV_WIS = 2,
MDIO_DEV_PCS = 3,
MDIO_DEV_XGXS = 4
};
/* PHY loopback direction */
enum {
PHY_LOOPBACK_TX = 1,
PHY_LOOPBACK_RX = 2
};
/* PHY interrupt types */
enum {
cphy_cause_link_change = 1,
cphy_cause_fifo_error = 2
};
/* PHY operations */
struct cphy_ops {
void (*destroy)(struct cphy *phy);
int (*reset)(struct cphy *phy, int wait);
int (*intr_enable)(struct cphy *phy);
int (*intr_disable)(struct cphy *phy);
int (*intr_clear)(struct cphy *phy);
int (*intr_handler)(struct cphy *phy);
int (*autoneg_enable)(struct cphy *phy);
int (*autoneg_restart)(struct cphy *phy);
int (*advertise)(struct cphy *phy, unsigned int advertise_map);
int (*set_loopback)(struct cphy *phy, int mmd, int dir, int enable);
int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
int *duplex, int *fc);
int (*power_down)(struct cphy *phy, int enable);
};
/* A PHY instance */
struct cphy {
int addr; /* PHY address */
struct adapter *adapter; /* associated adapter */
unsigned long fifo_errors; /* FIFO over/under-flows */
const struct cphy_ops *ops; /* PHY operations */
int (*mdio_read)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int *val);
int (*mdio_write)(struct adapter *adapter, int phy_addr, int mmd_addr,
int reg_addr, unsigned int val);
};
/* Convenience MDIO read/write wrappers */
static inline int mdio_read(struct cphy *phy, int mmd, int reg,
unsigned int *valp)
{
return phy->mdio_read(phy->adapter, phy->addr, mmd, reg, valp);
}
static inline int mdio_write(struct cphy *phy, int mmd, int reg,
unsigned int val)
{
return phy->mdio_write(phy->adapter, phy->addr, mmd, reg, val);
}
/* Convenience initializer */
static inline void cphy_init(struct cphy *phy, struct adapter *adapter,
int phy_addr, struct cphy_ops *phy_ops,
const struct mdio_ops *mdio_ops)
{
phy->adapter = adapter;
phy->addr = phy_addr;
phy->ops = phy_ops;
if (mdio_ops) {
phy->mdio_read = mdio_ops->read;
phy->mdio_write = mdio_ops->write;
}
}
/* Accumulate MAC statistics every 180 seconds. For 1G we multiply by 10. */
#define MAC_STATS_ACCUM_SECS 180
#define XGM_REG(reg_addr, idx) \
((reg_addr) + (idx) * (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR))
struct addr_val_pair {
unsigned int reg_addr;
unsigned int val;
};
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
# define PCI_VENDOR_ID_CHELSIO 0x1425
#endif
#define for_each_port(adapter, iter) \
for (iter = 0; iter < (adapter)->params.nports; ++iter)
#define adapter_info(adap) ((adap)->params.info)
static inline int uses_xaui(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_AUI;
}
static inline int is_10G(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_10000baseT_Full;
}
static inline int is_offload(const struct adapter *adap)
{
return adapter_info(adap)->caps & SUPPORTED_OFFLOAD;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
{
return adap->params.vpd.cclk / 1000;
}
static inline unsigned int is_pcie(const struct adapter *adap)
{
return adap->params.pci.variant == PCI_VARIANT_PCIE;
}
void t3_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
u32 val);
void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
int n, unsigned int offset);
int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay, u32 *valp);
static inline int t3_wait_op_done(struct adapter *adapter, int reg, u32 mask,
int polarity, int attempts, int delay)
{
return t3_wait_op_done_val(adapter, reg, mask, polarity, attempts,
delay, NULL);
}
int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
unsigned int set);
int t3_phy_reset(struct cphy *phy, int mmd, int wait);
int t3_phy_advertise(struct cphy *phy, unsigned int advert);
int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex);
void t3_intr_enable(struct adapter *adapter);
void t3_intr_disable(struct adapter *adapter);
void t3_intr_clear(struct adapter *adapter);
void t3_port_intr_enable(struct adapter *adapter, int idx);
void t3_port_intr_disable(struct adapter *adapter, int idx);
void t3_port_intr_clear(struct adapter *adapter, int idx);
int t3_slow_intr_handler(struct adapter *adapter);
int t3_phy_intr_handler(struct adapter *adapter);
void t3_link_changed(struct adapter *adapter, int port_id);
int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
int t3_seeprom_wp(struct adapter *adapter, int enable);
int t3_read_flash(struct adapter *adapter, unsigned int addr,
unsigned int nwords, u32 *data, int byte_oriented);
int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
int t3_get_fw_version(struct adapter *adapter, u32 *vers);
int t3_check_fw_version(struct adapter *adapter);
int t3_init_hw(struct adapter *adapter, u32 fw_params);
void mac_prep(struct cmac *mac, struct adapter *adapter, int index);
void early_hw_init(struct adapter *adapter, const struct adapter_info *ai);
int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
int reset);
void t3_led_ready(struct adapter *adapter);
void t3_fatal_err(struct adapter *adapter);
void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
const u8 * cpus, const u16 *rspq);
int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map);
int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask);
int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
unsigned int n, unsigned int *valp);
int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
u64 *buf);
int t3_mac_reset(struct cmac *mac);
void t3b_pcs_reset(struct cmac *mac);
int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
void t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode);
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes);
void t3_mc5_intr_handler(struct mc5 *mc5);
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start, unsigned int n,
u32 *buf);
int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh);
void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size);
void t3_tp_set_offload_mode(struct adapter *adap, int enable);
void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps);
void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
unsigned short alpha[NCCTRL_WIN],
unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap);
void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS]);
void t3_get_cong_cntl_tab(struct adapter *adap,
unsigned short incr[NMTUS][NCCTRL_WIN]);
void t3_config_trace_filter(struct adapter *adapter,
const struct trace_params *tp, int filter_index,
int invert, int enable);
int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched);
void t3_sge_prep(struct adapter *adap, struct sge_params *p);
void t3_sge_init(struct adapter *adap, struct sge_params *p);
int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
enum sge_context_type type, int respq, u64 base_addr,
unsigned int size, unsigned int token, int gen,
unsigned int cidx);
int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
int gts_enable, u64 base_addr, unsigned int size,
unsigned int esize, unsigned int cong_thres, int gen,
unsigned int cidx);
int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
int irq_vec_idx, u64 base_addr, unsigned int size,
unsigned int fl_thres, int gen, unsigned int cidx);
int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
unsigned int size, int rspq, int ovfl_mode,
unsigned int credits, unsigned int credit_thres);
int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable);
int t3_sge_disable_fl(struct adapter *adapter, unsigned int id);
int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id);
int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id);
int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4]);
int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
unsigned int credits);
void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
void t3_ael1002_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
void t3_ael1006_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
void t3_qt2045_phy_prep(struct cphy *phy, struct adapter *adapter, int phy_addr,
const struct mdio_ops *mdio_ops);
void t3_xaui_direct_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops);
#endif /* __CHELSIO_COMMON_H */

View File

@ -0,0 +1,142 @@
/*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef _CXGB3_OFFLOAD_CTL_DEFS_H
#define _CXGB3_OFFLOAD_CTL_DEFS_H
enum {
GET_MAX_OUTSTANDING_WR,
GET_TX_MAX_CHUNK,
GET_TID_RANGE,
GET_STID_RANGE,
GET_RTBL_RANGE,
GET_L2T_CAPACITY,
GET_MTUS,
GET_WR_LEN,
GET_IFF_FROM_MAC,
GET_DDP_PARAMS,
GET_PORTS,
ULP_ISCSI_GET_PARAMS,
ULP_ISCSI_SET_PARAMS,
RDMA_GET_PARAMS,
RDMA_CQ_OP,
RDMA_CQ_SETUP,
RDMA_CQ_DISABLE,
RDMA_CTRL_QP_SETUP,
RDMA_GET_MEM,
};
/*
* Structure used to describe a TID range. Valid TIDs are [base, base+num).
*/
struct tid_range {
unsigned int base; /* first TID */
unsigned int num; /* number of TIDs in range */
};
/*
* Structure used to request the size and contents of the MTU table.
*/
struct mtutab {
unsigned int size; /* # of entries in the MTU table */
const unsigned short *mtus; /* the MTU table values */
};
struct net_device;
/*
* Structure used to request the adapter net_device owning a given MAC address.
*/
struct iff_mac {
struct net_device *dev; /* the net_device */
const unsigned char *mac_addr; /* MAC address to lookup */
u16 vlan_tag;
};
struct pci_dev;
/*
* Structure used to request the TCP DDP parameters.
*/
struct ddp_params {
unsigned int llimit; /* TDDP region start address */
unsigned int ulimit; /* TDDP region end address */
unsigned int tag_mask; /* TDDP tag mask */
struct pci_dev *pdev;
};
struct adap_ports {
unsigned int nports; /* number of ports on this adapter */
struct net_device *lldevs[2];
};
/*
* Structure used to return information to the iscsi layer.
*/
struct ulp_iscsi_info {
unsigned int offset;
unsigned int llimit;
unsigned int ulimit;
unsigned int tagmask;
unsigned int pgsz3;
unsigned int pgsz2;
unsigned int pgsz1;
unsigned int pgsz0;
unsigned int max_rxsz;
unsigned int max_txsz;
struct pci_dev *pdev;
};
/*
* Structure used to return information to the RDMA layer.
*/
struct rdma_info {
unsigned int tpt_base; /* TPT base address */
unsigned int tpt_top; /* TPT last entry address */
unsigned int pbl_base; /* PBL base address */
unsigned int pbl_top; /* PBL last entry address */
unsigned int rqt_base; /* RQT base address */
unsigned int rqt_top; /* RQT last entry address */
unsigned int udbell_len; /* user doorbell region length */
unsigned long udbell_physbase; /* user doorbell physical start addr */
void __iomem *kdb_addr; /* kernel doorbell register address */
struct pci_dev *pdev; /* associated PCI device */
};
/*
* Structure used to request an operation on an RDMA completion queue.
*/
struct rdma_cq_op {
unsigned int id;
unsigned int op;
unsigned int credits;
};
/*
* Structure used to setup RDMA completion queues.
*/
struct rdma_cq_setup {
unsigned int id;
unsigned long long base_addr;
unsigned int size;
unsigned int credits;
unsigned int credit_thres;
unsigned int ovfl_mode;
};
/*
* Structure used to setup the RDMA control egress context.
*/
struct rdma_ctrlqp_setup {
unsigned long long base_addr;
unsigned int size;
};
#endif /* _CXGB3_OFFLOAD_CTL_DEFS_H */

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CHELSIO_DEFS_H
#define _CHELSIO_DEFS_H
#include <linux/skbuff.h>
#include <net/tcp.h>
#include "t3cdev.h"
#include "cxgb3_offload.h"
#define VALIDATE_TID 1
void *cxgb_alloc_mem(unsigned long size);
void cxgb_free_mem(void *addr);
void cxgb_neigh_update(struct neighbour *neigh);
void cxgb_redirect(struct dst_entry *old, struct dst_entry *new);
/*
* Map an ATID or STID to their entries in the corresponding TID tables.
*/
static inline union active_open_entry *atid2entry(const struct tid_info *t,
unsigned int atid)
{
return &t->atid_tab[atid - t->atid_base];
}
static inline union listen_entry *stid2entry(const struct tid_info *t,
unsigned int stid)
{
return &t->stid_tab[stid - t->stid_base];
}
/*
* Find the connection corresponding to a TID.
*/
static inline struct t3c_tid_entry *lookup_tid(const struct tid_info *t,
unsigned int tid)
{
return tid < t->ntids ? &(t->tid_tab[tid]) : NULL;
}
/*
* Find the connection corresponding to a server TID.
*/
static inline struct t3c_tid_entry *lookup_stid(const struct tid_info *t,
unsigned int tid)
{
if (tid < t->stid_base || tid >= t->stid_base + t->nstids)
return NULL;
return &(stid2entry(t, tid)->t3c_tid);
}
/*
* Find the connection corresponding to an active-open TID.
*/
static inline struct t3c_tid_entry *lookup_atid(const struct tid_info *t,
unsigned int tid)
{
if (tid < t->atid_base || tid >= t->atid_base + t->natids)
return NULL;
return &(atid2entry(t, tid)->t3c_tid);
}
int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n);
int attach_t3cdev(struct t3cdev *dev);
void detach_t3cdev(struct t3cdev *dev);
#endif

View File

@ -0,0 +1,165 @@
/*
* This file is part of the Chelsio T3 Ethernet driver for Linux.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#ifndef __CHIOCTL_H__
#define __CHIOCTL_H__
/*
* Ioctl commands specific to this driver.
*/
enum {
CHELSIO_SETREG = 1024,
CHELSIO_GETREG,
CHELSIO_SETTPI,
CHELSIO_GETTPI,
CHELSIO_GETMTUTAB,
CHELSIO_SETMTUTAB,
CHELSIO_GETMTU,
CHELSIO_SET_PM,
CHELSIO_GET_PM,
CHELSIO_GET_TCAM,
CHELSIO_SET_TCAM,
CHELSIO_GET_TCB,
CHELSIO_GET_MEM,
CHELSIO_LOAD_FW,
CHELSIO_GET_PROTO,
CHELSIO_SET_PROTO,
CHELSIO_SET_TRACE_FILTER,
CHELSIO_SET_QSET_PARAMS,
CHELSIO_GET_QSET_PARAMS,
CHELSIO_SET_QSET_NUM,
CHELSIO_GET_QSET_NUM,
CHELSIO_SET_PKTSCHED,
};
struct ch_reg {
uint32_t cmd;
uint32_t addr;
uint32_t val;
};
struct ch_cntxt {
uint32_t cmd;
uint32_t cntxt_type;
uint32_t cntxt_id;
uint32_t data[4];
};
/* context types */
enum { CNTXT_TYPE_EGRESS, CNTXT_TYPE_FL, CNTXT_TYPE_RSP, CNTXT_TYPE_CQ };
struct ch_desc {
uint32_t cmd;
uint32_t queue_num;
uint32_t idx;
uint32_t size;
uint8_t data[128];
};
struct ch_mem_range {
uint32_t cmd;
uint32_t mem_id;
uint32_t addr;
uint32_t len;
uint32_t version;
uint8_t buf[0];
};
struct ch_qset_params {
uint32_t cmd;
uint32_t qset_idx;
int32_t txq_size[3];
int32_t rspq_size;
int32_t fl_size[2];
int32_t intr_lat;
int32_t polling;
int32_t cong_thres;
};
struct ch_pktsched_params {
uint32_t cmd;
uint8_t sched;
uint8_t idx;
uint8_t min;
uint8_t max;
uint8_t binding;
};
#ifndef TCB_SIZE
# define TCB_SIZE 128
#endif
/* TCB size in 32-bit words */
#define TCB_WORDS (TCB_SIZE / 4)
enum { MEM_CM, MEM_PMRX, MEM_PMTX }; /* ch_mem_range.mem_id values */
struct ch_mtus {
uint32_t cmd;
uint32_t nmtus;
uint16_t mtus[NMTUS];
};
struct ch_pm {
uint32_t cmd;
uint32_t tx_pg_sz;
uint32_t tx_num_pg;
uint32_t rx_pg_sz;
uint32_t rx_num_pg;
uint32_t pm_total;
};
struct ch_tcam {
uint32_t cmd;
uint32_t tcam_size;
uint32_t nservers;
uint32_t nroutes;
uint32_t nfilters;
};
struct ch_tcb {
uint32_t cmd;
uint32_t tcb_index;
uint32_t tcb_data[TCB_WORDS];
};
struct ch_tcam_word {
uint32_t cmd;
uint32_t addr;
uint32_t buf[3];
};
struct ch_trace {
uint32_t cmd;
uint32_t sip;
uint32_t sip_mask;
uint32_t dip;
uint32_t dip_mask;
uint16_t sport;
uint16_t sport_mask;
uint16_t dport;
uint16_t dport_mask;
uint32_t vlan:12;
uint32_t vlan_mask:12;
uint32_t intf:4;
uint32_t intf_mask:4;
uint8_t proto;
uint8_t proto_mask;
uint8_t invert_match:1;
uint8_t config_tx:1;
uint8_t config_rx:1;
uint8_t trace_tx:1;
uint8_t trace_rx:1;
};
#define SIOCCHIOCTL SIOCDEVPRIVATE
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,193 @@
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CXGB3_OFFLOAD_H
#define _CXGB3_OFFLOAD_H
#include <linux/list.h>
#include <linux/skbuff.h>
#include "l2t.h"
#include "t3cdev.h"
#include "t3_cpl.h"
struct adapter;
void cxgb3_offload_init(void);
void cxgb3_adapter_ofld(struct adapter *adapter);
void cxgb3_adapter_unofld(struct adapter *adapter);
int cxgb3_offload_activate(struct adapter *adapter);
void cxgb3_offload_deactivate(struct adapter *adapter);
void cxgb3_set_dummy_ops(struct t3cdev *dev);
/*
* Client registration. Users of T3 driver must register themselves.
* The T3 driver will call the add function of every client for each T3
* adapter activated, passing up the t3cdev ptr. Each client fills out an
* array of callback functions to process CPL messages.
*/
void cxgb3_register_client(struct cxgb3_client *client);
void cxgb3_unregister_client(struct cxgb3_client *client);
void cxgb3_add_clients(struct t3cdev *tdev);
void cxgb3_remove_clients(struct t3cdev *tdev);
typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
struct sk_buff *skb, void *ctx);
struct cxgb3_client {
char *name;
void (*add) (struct t3cdev *);
void (*remove) (struct t3cdev *);
cxgb3_cpl_handler_func *handlers;
int (*redirect)(void *ctx, struct dst_entry *old,
struct dst_entry *new, struct l2t_entry *l2t);
struct list_head client_list;
};
/*
* TID allocation services.
*/
int cxgb3_alloc_atid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
int cxgb3_alloc_stid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx);
void *cxgb3_free_atid(struct t3cdev *dev, int atid);
void cxgb3_free_stid(struct t3cdev *dev, int stid);
void cxgb3_insert_tid(struct t3cdev *dev, struct cxgb3_client *client,
void *ctx, unsigned int tid);
void cxgb3_queue_tid_release(struct t3cdev *dev, unsigned int tid);
void cxgb3_remove_tid(struct t3cdev *dev, void *ctx, unsigned int tid);
struct t3c_tid_entry {
struct cxgb3_client *client;
void *ctx;
};
/* CPL message priority levels */
enum {
CPL_PRIORITY_DATA = 0, /* data messages */
CPL_PRIORITY_SETUP = 1, /* connection setup messages */
CPL_PRIORITY_TEARDOWN = 0, /* connection teardown messages */
CPL_PRIORITY_LISTEN = 1, /* listen start/stop messages */
CPL_PRIORITY_ACK = 1, /* RX ACK messages */
CPL_PRIORITY_CONTROL = 1 /* offload control messages */
};
/* Flags for return value of CPL message handlers */
enum {
CPL_RET_BUF_DONE = 1, /* buffer processing done, buffer may be freed */
CPL_RET_BAD_MSG = 2, /* bad CPL message (e.g., unknown opcode) */
CPL_RET_UNKNOWN_TID = 4 /* unexpected unknown TID */
};
typedef int (*cpl_handler_func)(struct t3cdev *dev, struct sk_buff *skb);
/*
* Returns a pointer to the first byte of the CPL header in an sk_buff that
* contains a CPL message.
*/
static inline void *cplhdr(struct sk_buff *skb)
{
return skb->data;
}
void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h);
union listen_entry {
struct t3c_tid_entry t3c_tid;
union listen_entry *next;
};
union active_open_entry {
struct t3c_tid_entry t3c_tid;
union active_open_entry *next;
};
/*
* Holds the size, base address, free list start, etc of the TID, server TID,
* and active-open TID tables for a offload device.
* The tables themselves are allocated dynamically.
*/
struct tid_info {
struct t3c_tid_entry *tid_tab;
unsigned int ntids;
atomic_t tids_in_use;
union listen_entry *stid_tab;
unsigned int nstids;
unsigned int stid_base;
union active_open_entry *atid_tab;
unsigned int natids;
unsigned int atid_base;
/*
* The following members are accessed R/W so we put them in their own
* cache lines.
*
* XXX We could combine the atid fields above with the lock here since
* atids are use once (unlike other tids). OTOH the above fields are
* usually in cache due to tid_tab.
*/
spinlock_t atid_lock ____cacheline_aligned_in_smp;
union active_open_entry *afree;
unsigned int atids_in_use;
spinlock_t stid_lock ____cacheline_aligned;
union listen_entry *sfree;
unsigned int stids_in_use;
};
struct t3c_data {
struct list_head list_node;
struct t3cdev *dev;
unsigned int tx_max_chunk; /* max payload for TX_DATA */
unsigned int max_wrs; /* max in-flight WRs per connection */
unsigned int nmtus;
const unsigned short *mtus;
struct tid_info tid_maps;
struct t3c_tid_entry *tid_release_list;
spinlock_t tid_release_lock;
struct work_struct tid_release_task;
};
/*
* t3cdev -> t3c_data accessor
*/
#define T3C_DATA(dev) (*(struct t3c_data **)&(dev)->l4opt)
#endif

View File

@ -0,0 +1,144 @@
/*
* ----------------------------------------------------------------------------
* >>>>>>>>>>>>>>>>>>>>>>>>>>>>> COPYRIGHT NOTICE <<<<<<<<<<<<<<<<<<<<<<<<<<<<<
* ----------------------------------------------------------------------------
* Copyright 2004 (C) Chelsio Communications, Inc. (Chelsio)
*
* Chelsio Communications, Inc. owns the sole copyright to this software.
* You may not make a copy, you may not derive works herefrom, and you may
* not distribute this work to others. Other restrictions of rights may apply
* as well. This is unpublished, confidential information. All rights reserved.
* This software contains confidential information and trade secrets of Chelsio
* Communications, Inc. Use, disclosure, or reproduction is prohibited without
* the prior express written permission of Chelsio Communications, Inc.
* ----------------------------------------------------------------------------
* >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Warranty <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
* ----------------------------------------------------------------------------
* CHELSIO MAKES NO WARRANTY OF ANY KIND WITH REGARD TO THE USE OF THIS
* SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
* ----------------------------------------------------------------------------
*
* This is the firmware_exports.h header file, firmware interface defines.
*
* Written January 2005 by felix marti (felix@chelsio.com)
*/
#ifndef _FIRMWARE_EXPORTS_H_
#define _FIRMWARE_EXPORTS_H_
/* WR OPCODES supported by the firmware.
*/
#define FW_WROPCODE_FORWARD 0x01
#define FW_WROPCODE_BYPASS 0x05
#define FW_WROPCODE_TUNNEL_TX_PKT 0x03
#define FW_WROPOCDE_ULPTX_DATA_SGL 0x00
#define FW_WROPCODE_ULPTX_MEM_READ 0x02
#define FW_WROPCODE_ULPTX_PKT 0x04
#define FW_WROPCODE_ULPTX_INVALIDATE 0x06
#define FW_WROPCODE_TUNNEL_RX_PKT 0x07
#define FW_WROPCODE_OFLD_GETTCB_RPL 0x08
#define FW_WROPCODE_OFLD_CLOSE_CON 0x09
#define FW_WROPCODE_OFLD_TP_ABORT_CON_REQ 0x0A
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL 0x0F
#define FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ 0x0B
#define FW_WROPCODE_OFLD_TP_ABORT_CON_RPL 0x0C
#define FW_WROPCODE_OFLD_TX_DATA 0x0D
#define FW_WROPCODE_OFLD_TX_DATA_ACK 0x0E
#define FW_WROPCODE_RI_RDMA_INIT 0x10
#define FW_WROPCODE_RI_RDMA_WRITE 0x11
#define FW_WROPCODE_RI_RDMA_READ_REQ 0x12
#define FW_WROPCODE_RI_RDMA_READ_RESP 0x13
#define FW_WROPCODE_RI_SEND 0x14
#define FW_WROPCODE_RI_TERMINATE 0x15
#define FW_WROPCODE_RI_RDMA_READ 0x16
#define FW_WROPCODE_RI_RECEIVE 0x17
#define FW_WROPCODE_RI_BIND_MW 0x18
#define FW_WROPCODE_RI_FASTREGISTER_MR 0x19
#define FW_WROPCODE_RI_LOCAL_INV 0x1A
#define FW_WROPCODE_RI_MODIFY_QP 0x1B
#define FW_WROPCODE_RI_BYPASS 0x1C
#define FW_WROPOCDE_RSVD 0x1E
#define FW_WROPCODE_SGE_EGRESSCONTEXT_RR 0x1F
#define FW_WROPCODE_MNGT 0x1D
#define FW_MNGTOPCODE_PKTSCHED_SET 0x00
/* Maximum size of a WR sent from the host, limited by the SGE.
*
* Note: WR coming from ULP or TP are only limited by CIM.
*/
#define FW_WR_SIZE 128
/* Maximum number of outstanding WRs sent from the host. Value must be
* programmed in the CTRL/TUNNEL/QP SGE Egress Context and used by
* offload modules to limit the number of WRs per connection.
*/
#define FW_T3_WR_NUM 16
#define FW_N3_WR_NUM 7
#ifndef N3
# define FW_WR_NUM FW_T3_WR_NUM
#else
# define FW_WR_NUM FW_N3_WR_NUM
#endif
/* FW_TUNNEL_NUM corresponds to the number of supported TUNNEL Queues. These
* queues must start at SGE Egress Context FW_TUNNEL_SGEEC_START and must
* start at 'TID' (or 'uP Token') FW_TUNNEL_TID_START.
*
* Ingress Traffic (e.g. DMA completion credit) for TUNNEL Queue[i] is sent
* to RESP Queue[i].
*/
#define FW_TUNNEL_NUM 8
#define FW_TUNNEL_SGEEC_START 8
#define FW_TUNNEL_TID_START 65544
/* FW_CTRL_NUM corresponds to the number of supported CTRL Queues. These queues
* must start at SGE Egress Context FW_CTRL_SGEEC_START and must start at 'TID'
* (or 'uP Token') FW_CTRL_TID_START.
*
* Ingress Traffic for CTRL Queue[i] is sent to RESP Queue[i].
*/
#define FW_CTRL_NUM 8
#define FW_CTRL_SGEEC_START 65528
#define FW_CTRL_TID_START 65536
/* FW_OFLD_NUM corresponds to the number of supported OFFLOAD Queues. These
* queues must start at SGE Egress Context FW_OFLD_SGEEC_START.
*
* Note: the 'uP Token' in the SGE Egress Context fields is irrelevant for
* OFFLOAD Queues, as the host is responsible for providing the correct TID in
* every WR.
*
* Ingress Trafffic for OFFLOAD Queue[i] is sent to RESP Queue[i].
*/
#define FW_OFLD_NUM 8
#define FW_OFLD_SGEEC_START 0
/*
*
*/
#define FW_RI_NUM 1
#define FW_RI_SGEEC_START 65527
#define FW_RI_TID_START 65552
/*
* The RX_PKT_TID
*/
#define FW_RX_PKT_NUM 1
#define FW_RX_PKT_TID_START 65553
/* FW_WRC_NUM corresponds to the number of Work Request Context that supported
* by the firmware.
*/
#define FW_WRC_NUM \
(65536 + FW_TUNNEL_NUM + FW_CTRL_NUM + FW_RI_NUM + FW_RX_PKT_NUM)
#endif /* _FIRMWARE_EXPORTS_H_ */

450
drivers/net/cxgb3/l2t.c Normal file
View File

@ -0,0 +1,450 @@
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/jhash.h>
#include <net/neighbour.h>
#include "common.h"
#include "t3cdev.h"
#include "cxgb3_defs.h"
#include "l2t.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
#define VLAN_NONE 0xfff
/*
* Module locking notes: There is a RW lock protecting the L2 table as a
* whole plus a spinlock per L2T entry. Entry lookups and allocations happen
* under the protection of the table lock, individual entry changes happen
* while holding that entry's spinlock. The table lock nests outside the
* entry locks. Allocations of new entries take the table lock as writers so
* no other lookups can happen while allocating new entries. Entry updates
* take the table lock as readers so multiple entries can be updated in
* parallel. An L2T entry can be dropped by decrementing its reference count
* and therefore can happen in parallel with entry allocation but no entry
* can change state or increment its ref count during allocation as both of
* these perform lookups.
*/
static inline unsigned int vlan_prio(const struct l2t_entry *e)
{
return e->vlan >> 13;
}
static inline unsigned int arp_hash(u32 key, int ifindex,
const struct l2t_data *d)
{
return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
}
static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
{
neigh_hold(n);
if (e->neigh)
neigh_release(e->neigh);
e->neigh = n;
}
/*
* Set up an L2T entry and send any packets waiting in the arp queue. The
* supplied skb is used for the CPL_L2T_WRITE_REQ. Must be called with the
* entry locked.
*/
static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
struct cpl_l2t_write_req *req;
if (!skb) {
skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
}
req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
V_L2T_W_PRIO(vlan_prio(e)));
memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
skb->priority = CPL_PRIORITY_CONTROL;
cxgb3_ofld_send(dev, skb);
while (e->arpq_head) {
skb = e->arpq_head;
e->arpq_head = skb->next;
skb->next = NULL;
cxgb3_ofld_send(dev, skb);
}
e->arpq_tail = NULL;
e->state = L2T_STATE_VALID;
return 0;
}
/*
* Add a packet to the an L2T entry's queue of packets awaiting resolution.
* Must be called with the entry's lock held.
*/
static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
{
skb->next = NULL;
if (e->arpq_head)
e->arpq_tail->next = skb;
else
e->arpq_head = skb;
e->arpq_tail = skb;
}
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE)
e->state = L2T_STATE_VALID;
spin_unlock_bh(&e->lock);
case L2T_STATE_VALID: /* fast-path, send the packet on */
return cxgb3_ofld_send(dev, skb);
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_RESOLVING) {
/* ARP already completed */
spin_unlock_bh(&e->lock);
goto again;
}
arpq_enqueue(e, skb);
spin_unlock_bh(&e->lock);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
if (!neigh_event_send(e->neigh, NULL)) {
skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
GFP_ATOMIC);
if (!skb)
break;
spin_lock_bh(&e->lock);
if (e->arpq_head)
setup_l2e_send_pending(dev, skb, e);
else /* we lost the race */
__kfree_skb(skb);
spin_unlock_bh(&e->lock);
}
}
return 0;
}
EXPORT_SYMBOL(t3_l2t_send_slow);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
{
again:
switch (e->state) {
case L2T_STATE_STALE: /* entry is stale, kick off revalidation */
neigh_event_send(e->neigh, NULL);
spin_lock_bh(&e->lock);
if (e->state == L2T_STATE_STALE) {
e->state = L2T_STATE_VALID;
}
spin_unlock_bh(&e->lock);
return;
case L2T_STATE_VALID: /* fast-path, send the packet on */
return;
case L2T_STATE_RESOLVING:
spin_lock_bh(&e->lock);
if (e->state != L2T_STATE_RESOLVING) {
/* ARP already completed */
spin_unlock_bh(&e->lock);
goto again;
}
spin_unlock_bh(&e->lock);
/*
* Only the first packet added to the arpq should kick off
* resolution. However, because the alloc_skb below can fail,
* we allow each packet added to the arpq to retry resolution
* as a way of recovering from transient memory exhaustion.
* A better way would be to use a work request to retry L2T
* entries when there's no memory.
*/
neigh_event_send(e->neigh, NULL);
}
return;
}
EXPORT_SYMBOL(t3_l2t_send_event);
/*
* Allocate a free L2T entry. Must be called with l2t_data.lock held.
*/
static struct l2t_entry *alloc_l2e(struct l2t_data *d)
{
struct l2t_entry *end, *e, **p;
if (!atomic_read(&d->nfree))
return NULL;
/* there's definitely a free entry */
for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
if (atomic_read(&e->refcnt) == 0)
goto found;
for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
found:
d->rover = e + 1;
atomic_dec(&d->nfree);
/*
* The entry we found may be an inactive entry that is
* presently in the hash table. We need to remove it.
*/
if (e->state != L2T_STATE_UNUSED) {
int hash = arp_hash(e->addr, e->ifindex, d);
for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
if (*p == e) {
*p = e->next;
break;
}
e->state = L2T_STATE_UNUSED;
}
return e;
}
/*
* Called when an L2T entry has no more users. The entry is left in the hash
* table since it is likely to be reused but we also bump nfree to indicate
* that the entry can be reallocated for a different neighbor. We also drop
* the existing neighbor reference in case the neighbor is going away and is
* waiting on our reference.
*
* Because entries can be reallocated to other neighbors once their ref count
* drops to 0 we need to take the entry's lock to avoid races with a new
* incarnation.
*/
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
{
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
if (e->neigh) {
neigh_release(e->neigh);
e->neigh = NULL;
}
}
spin_unlock_bh(&e->lock);
atomic_inc(&d->nfree);
}
EXPORT_SYMBOL(t3_l2e_free);
/*
* Update an L2T entry that was previously used for the same next hop as neigh.
* Must be called with softirqs disabled.
*/
static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
{
unsigned int nud_state;
spin_lock(&e->lock); /* avoid race with t3_l2t_free */
if (neigh != e->neigh)
neigh_replace(e, neigh);
nud_state = neigh->nud_state;
if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
!(nud_state & NUD_VALID))
e->state = L2T_STATE_RESOLVING;
else if (nud_state & NUD_CONNECTED)
e->state = L2T_STATE_VALID;
else
e->state = L2T_STATE_STALE;
spin_unlock(&e->lock);
}
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev)
{
struct l2t_entry *e;
struct l2t_data *d = L2DATA(cdev);
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
struct port_info *p = netdev_priv(dev);
int smt_idx = p->port_id;
write_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx &&
e->smt_idx == smt_idx) {
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
goto done;
}
/* Need to allocate a new entry */
e = alloc_l2e(d);
if (e) {
spin_lock(&e->lock); /* avoid race with t3_l2t_free */
e->next = d->l2tab[hash].first;
d->l2tab[hash].first = e;
e->state = L2T_STATE_RESOLVING;
e->addr = addr;
e->ifindex = ifidx;
e->smt_idx = smt_idx;
atomic_set(&e->refcnt, 1);
neigh_replace(e, neigh);
if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
e->vlan = VLAN_DEV_INFO(neigh->dev)->vlan_id;
else
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
done:
write_unlock_bh(&d->lock);
return e;
}
EXPORT_SYMBOL(t3_l2t_get);
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
* otherwise the packets is sent to the offload device.
*
* XXX: maybe we should abandon the latter behavior and just require a failure
* handler.
*/
static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff *arpq)
{
while (arpq) {
struct sk_buff *skb = arpq;
struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
arpq = skb->next;
skb->next = NULL;
if (cb->arp_failure_handler)
cb->arp_failure_handler(dev, skb);
else
cxgb3_ofld_send(dev, skb);
}
}
/*
* Called when the host's ARP layer makes a change to some entry that is
* loaded into the HW L2 table.
*/
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
{
struct l2t_entry *e;
struct sk_buff *arpq = NULL;
struct l2t_data *d = L2DATA(dev);
u32 addr = *(u32 *) neigh->primary_key;
int ifidx = neigh->dev->ifindex;
int hash = arp_hash(addr, ifidx, d);
read_lock_bh(&d->lock);
for (e = d->l2tab[hash].first; e; e = e->next)
if (e->addr == addr && e->ifindex == ifidx) {
spin_lock(&e->lock);
goto found;
}
read_unlock_bh(&d->lock);
return;
found:
read_unlock(&d->lock);
if (atomic_read(&e->refcnt)) {
if (neigh != e->neigh)
neigh_replace(e, neigh);
if (e->state == L2T_STATE_RESOLVING) {
if (neigh->nud_state & NUD_FAILED) {
arpq = e->arpq_head;
e->arpq_head = e->arpq_tail = NULL;
} else if (neigh_is_connected(neigh))
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, neigh->ha, 6))
setup_l2e_send_pending(dev, NULL, e);
}
}
spin_unlock_bh(&e->lock);
if (arpq)
handle_failed_resolution(dev, arpq);
}
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
{
struct l2t_data *d;
int i, size = sizeof(*d) + l2t_capacity * sizeof(struct l2t_entry);
d = cxgb_alloc_mem(size);
if (!d)
return NULL;
d->nentries = l2t_capacity;
d->rover = &d->l2tab[1]; /* entry 0 is not used */
atomic_set(&d->nfree, l2t_capacity - 1);
rwlock_init(&d->lock);
for (i = 0; i < l2t_capacity; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
}
return d;
}
void t3_free_l2t(struct l2t_data *d)
{
cxgb_free_mem(d);
}

143
drivers/net/cxgb3/l2t.h Normal file
View File

@ -0,0 +1,143 @@
/*
* Copyright (c) 2006 Chelsio, Inc. All rights reserved.
* Copyright (c) 2006 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _CHELSIO_L2T_H
#define _CHELSIO_L2T_H
#include <linux/spinlock.h>
#include "t3cdev.h"
#include <asm/atomic.h>
enum {
L2T_STATE_VALID, /* entry is up to date */
L2T_STATE_STALE, /* entry may be used but needs revalidation */
L2T_STATE_RESOLVING, /* entry needs address resolution */
L2T_STATE_UNUSED /* entry not in use */
};
struct neighbour;
struct sk_buff;
/*
* Each L2T entry plays multiple roles. First of all, it keeps state for the
* corresponding entry of the HW L2 table and maintains a queue of offload
* packets awaiting address resolution. Second, it is a node of a hash table
* chain, where the nodes of the chain are linked together through their next
* pointer. Finally, each node is a bucket of a hash table, pointing to the
* first element in its chain through its first pointer.
*/
struct l2t_entry {
u16 state; /* entry state */
u16 idx; /* entry index */
u32 addr; /* dest IP address */
int ifindex; /* neighbor's net_device's ifindex */
u16 smt_idx; /* SMT index */
u16 vlan; /* VLAN TCI (id: bits 0-11, prio: 13-15 */
struct neighbour *neigh; /* associated neighbour */
struct l2t_entry *first; /* start of hash chain */
struct l2t_entry *next; /* next l2t_entry on chain */
struct sk_buff *arpq_head; /* queue of packets awaiting resolution */
struct sk_buff *arpq_tail;
spinlock_t lock;
atomic_t refcnt; /* entry reference count */
u8 dmac[6]; /* neighbour's MAC address */
};
struct l2t_data {
unsigned int nentries; /* number of entries */
struct l2t_entry *rover; /* starting point for next allocation */
atomic_t nfree; /* number of free entries */
rwlock_t lock;
struct l2t_entry l2tab[0];
};
typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
struct sk_buff * skb);
/*
* Callback stored in an skb to handle address resolution failure.
*/
struct l2t_skb_cb {
arp_failure_handler_func arp_failure_handler;
};
#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
static inline void set_arp_failure_handler(struct sk_buff *skb,
arp_failure_handler_func hnd)
{
L2T_SKB_CB(skb)->arp_failure_handler = hnd;
}
/*
* Getting to the L2 data from an offload device.
*/
#define L2DATA(dev) ((dev)->l2opt)
#define W_TCB_L2T_IX 0
#define S_TCB_L2T_IX 7
#define M_TCB_L2T_IX 0x7ffULL
#define V_TCB_L2T_IX(x) ((x) << S_TCB_L2T_IX)
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e);
struct l2t_data *t3_init_l2t(unsigned int l2t_capacity);
void t3_free_l2t(struct l2t_data *d);
int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb);
static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e)
{
if (likely(e->state == L2T_STATE_VALID))
return cxgb3_ofld_send(dev, skb);
return t3_l2t_send_slow(dev, skb, e);
}
static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
t3_l2e_free(d, e);
}
static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{
if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */
atomic_dec(&d->nfree);
}
#endif

453
drivers/net/cxgb3/mc5.c Normal file
View File

@ -0,0 +1,453 @@
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
#include "regs.h"
enum {
IDT75P52100 = 4,
IDT75N43102 = 5
};
/* DBGI command mode */
enum {
DBGI_MODE_MBUS = 0,
DBGI_MODE_IDT52100 = 5
};
/* IDT 75P52100 commands */
#define IDT_CMD_READ 0
#define IDT_CMD_WRITE 1
#define IDT_CMD_SEARCH 2
#define IDT_CMD_LEARN 3
/* IDT LAR register address and value for 144-bit mode (low 32 bits) */
#define IDT_LAR_ADR0 0x180006
#define IDT_LAR_MODE144 0xffff0000
/* IDT SCR and SSR addresses (low 32 bits) */
#define IDT_SCR_ADR0 0x180000
#define IDT_SSR0_ADR0 0x180002
#define IDT_SSR1_ADR0 0x180004
/* IDT GMR base address (low 32 bits) */
#define IDT_GMR_BASE_ADR0 0x180020
/* IDT data and mask array base addresses (low 32 bits) */
#define IDT_DATARY_BASE_ADR0 0
#define IDT_MSKARY_BASE_ADR0 0x80000
/* IDT 75N43102 commands */
#define IDT4_CMD_SEARCH144 3
#define IDT4_CMD_WRITE 4
#define IDT4_CMD_READ 5
/* IDT 75N43102 SCR address (low 32 bits) */
#define IDT4_SCR_ADR0 0x3
/* IDT 75N43102 GMR base addresses (low 32 bits) */
#define IDT4_GMR_BASE0 0x10
#define IDT4_GMR_BASE1 0x20
#define IDT4_GMR_BASE2 0x30
/* IDT 75N43102 data and mask array base addresses (low 32 bits) */
#define IDT4_DATARY_BASE_ADR0 0x1000000
#define IDT4_MSKARY_BASE_ADR0 0x2000000
#define MAX_WRITE_ATTEMPTS 5
#define MAX_ROUTES 2048
/*
* Issue a command to the TCAM and wait for its completion. The address and
* any data required by the command must have been setup by the caller.
*/
static int mc5_cmd_write(struct adapter *adapter, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_CMD, cmd);
return t3_wait_op_done(adapter, A_MC5_DB_DBGI_RSP_STATUS,
F_DBGIRSPVALID, 1, MAX_WRITE_ATTEMPTS, 1);
}
static inline void dbgi_wr_addr3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR2, v3);
}
static inline void dbgi_wr_data3(struct adapter *adapter, u32 v1, u32 v2,
u32 v3)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA0, v1);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA1, v2);
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_DATA2, v3);
}
static inline void dbgi_rd_rsp3(struct adapter *adapter, u32 *v1, u32 *v2,
u32 *v3)
{
*v1 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA0);
*v2 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA1);
*v3 = t3_read_reg(adapter, A_MC5_DB_DBGI_RSP_DATA2);
}
/*
* Write data to the TCAM register at address (0, 0, addr_lo) using the TCAM
* command cmd. The data to be written must have been set up by the caller.
* Returns -1 on failure, 0 on success.
*/
static int mc5_write(struct adapter *adapter, u32 addr_lo, u32 cmd)
{
t3_write_reg(adapter, A_MC5_DB_DBGI_REQ_ADDR0, addr_lo);
if (mc5_cmd_write(adapter, cmd) == 0)
return 0;
CH_ERR(adapter, "MC5 timeout writing to TCAM address 0x%x\n",
addr_lo);
return -1;
}
static int init_mask_data_array(struct mc5 *mc5, u32 mask_array_base,
u32 data_array_base, u32 write_cmd,
int addr_shift)
{
unsigned int i;
struct adapter *adap = mc5->adapter;
/*
* We need the size of the TCAM data and mask arrays in terms of
* 72-bit entries.
*/
unsigned int size72 = mc5->tcam_size;
unsigned int server_base = t3_read_reg(adap, A_MC5_DB_SERVER_INDEX);
if (mc5->mode == MC5_MODE_144_BIT) {
size72 *= 2; /* 1 144-bit entry is 2 72-bit entries */
server_base *= 2;
}
/* Clear the data array */
dbgi_wr_data3(adap, 0, 0, 0);
for (i = 0; i < size72; i++)
if (mc5_write(adap, data_array_base + (i << addr_shift),
write_cmd))
return -1;
/* Initialize the mask array. */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < size72; i++) {
if (i == server_base) /* entering server or routing region */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_DATA0,
mc5->mode == MC5_MODE_144_BIT ?
0xfffffff9 : 0xfffffffd);
if (mc5_write(adap, mask_array_base + (i << addr_shift),
write_cmd))
return -1;
}
return 0;
}
static int init_idt52100(struct mc5 *mc5)
{
int i;
struct adapter *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
V_RDLAT(0x15) | V_LRNLAT(0x15) | V_SRCHLAT(0x15));
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 2);
/*
* Use GMRs 14-15 for ELOOKUP, GMRs 12-13 for SYN lookups, and
* GMRs 8-9 for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_AOPEN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT_CMD_SEARCH | 0x6000);
t3_write_reg(adap, A_MC5_DB_SYN_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ACK_LRN_CMD, IDT_CMD_LEARN);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT_CMD_SEARCH);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT_CMD_SEARCH | 0x7000);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT_CMD_READ);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up LAR */
dbgi_wr_data3(adap, IDT_LAR_MODE144, 0, 0);
if (mc5_write(adap, IDT_LAR_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up SSRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0);
if (mc5_write(adap, IDT_SSR0_ADR0, IDT_CMD_WRITE) ||
mc5_write(adap, IDT_SSR1_ADR0, IDT_CMD_WRITE))
goto err;
/* Set up GMRs */
for (i = 0; i < 32; ++i) {
if (i >= 12 && i < 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
else if (i == 15)
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
else
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
if (mc5_write(adap, IDT_GMR_BASE_ADR0 + i, IDT_CMD_WRITE))
goto err;
}
/* Set up SCR */
dbgi_wr_data3(adap, 1, 0, 0);
if (mc5_write(adap, IDT_SCR_ADR0, IDT_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT_MSKARY_BASE_ADR0,
IDT_DATARY_BASE_ADR0, IDT_CMD_WRITE, 0);
err:
return -EIO;
}
static int init_idt43102(struct mc5 *mc5)
{
int i;
struct adapter *adap = mc5->adapter;
t3_write_reg(adap, A_MC5_DB_RSP_LATENCY,
adap->params.rev == 0 ? V_RDLAT(0xd) | V_SRCHLAT(0x11) :
V_RDLAT(0xd) | V_SRCHLAT(0x12));
/*
* Use GMRs 24-25 for ELOOKUP, GMRs 20-21 for SYN lookups, and no mask
* for ACK- and AOPEN searches.
*/
t3_write_reg(adap, A_MC5_DB_POPEN_DATA_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_POPEN_MASK_WR_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_AOPEN_SRCH_CMD,
IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_SYN_SRCH_CMD, IDT4_CMD_SEARCH144);
t3_write_reg(adap, A_MC5_DB_ACK_SRCH_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ILOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x3800);
t3_write_reg(adap, A_MC5_DB_ELOOKUP_CMD, IDT4_CMD_SEARCH144 | 0x800);
t3_write_reg(adap, A_MC5_DB_DATA_WRITE_CMD, IDT4_CMD_WRITE);
t3_write_reg(adap, A_MC5_DB_DATA_READ_CMD, IDT4_CMD_READ);
t3_write_reg(adap, A_MC5_DB_PART_ID_INDEX, 3);
/* Set DBGI command mode for IDT TCAM. */
t3_write_reg(adap, A_MC5_DB_DBGI_CONFIG, DBGI_MODE_IDT52100);
/* Set up GMRs */
dbgi_wr_data3(adap, 0xffffffff, 0xffffffff, 0xff);
for (i = 0; i < 7; ++i)
if (mc5_write(adap, IDT4_GMR_BASE0 + i, IDT4_CMD_WRITE))
goto err;
for (i = 0; i < 4; ++i)
if (mc5_write(adap, IDT4_GMR_BASE2 + i, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffffffff, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 1, IDT4_CMD_WRITE) ||
mc5_write(adap, IDT4_GMR_BASE1 + 4, IDT4_CMD_WRITE))
goto err;
dbgi_wr_data3(adap, 0xfffffff9, 0xffff8007, 0xff);
if (mc5_write(adap, IDT4_GMR_BASE1 + 5, IDT4_CMD_WRITE))
goto err;
/* Set up SCR */
dbgi_wr_data3(adap, 0xf0000000, 0, 0);
if (mc5_write(adap, IDT4_SCR_ADR0, IDT4_CMD_WRITE))
goto err;
return init_mask_data_array(mc5, IDT4_MSKARY_BASE_ADR0,
IDT4_DATARY_BASE_ADR0, IDT4_CMD_WRITE, 1);
err:
return -EIO;
}
/* Put MC5 in DBGI mode. */
static inline void mc5_dbgi_mode_enable(const struct mc5 *mc5)
{
t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_DBGIEN);
}
/* Put MC5 in M-Bus mode. */
static void mc5_dbgi_mode_disable(const struct mc5 *mc5)
{
t3_write_reg(mc5->adapter, A_MC5_DB_CONFIG,
V_TMMODE(mc5->mode == MC5_MODE_72_BIT) |
V_COMPEN(mc5->mode == MC5_MODE_72_BIT) |
V_PRTYEN(mc5->parity_enabled) | F_MBUSEN);
}
/*
* Initialization that requires the OS and protocol layers to already
* be intialized goes here.
*/
int t3_mc5_init(struct mc5 *mc5, unsigned int nservers, unsigned int nfilters,
unsigned int nroutes)
{
u32 cfg;
int err;
unsigned int tcam_size = mc5->tcam_size;
struct adapter *adap = mc5->adapter;
if (nroutes > MAX_ROUTES || nroutes + nservers + nfilters > tcam_size)
return -EINVAL;
/* Reset the TCAM */
cfg = t3_read_reg(adap, A_MC5_DB_CONFIG) & ~F_TMMODE;
cfg |= V_TMMODE(mc5->mode == MC5_MODE_72_BIT) | F_TMRST;
t3_write_reg(adap, A_MC5_DB_CONFIG, cfg);
if (t3_wait_op_done(adap, A_MC5_DB_CONFIG, F_TMRDY, 1, 500, 0)) {
CH_ERR(adap, "TCAM reset timed out\n");
return -1;
}
t3_write_reg(adap, A_MC5_DB_ROUTING_TABLE_INDEX, tcam_size - nroutes);
t3_write_reg(adap, A_MC5_DB_FILTER_TABLE,
tcam_size - nroutes - nfilters);
t3_write_reg(adap, A_MC5_DB_SERVER_INDEX,
tcam_size - nroutes - nfilters - nservers);
mc5->parity_enabled = 1;
/* All the TCAM addresses we access have only the low 32 bits non 0 */
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR1, 0);
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR2, 0);
mc5_dbgi_mode_enable(mc5);
switch (mc5->part_type) {
case IDT75P52100:
err = init_idt52100(mc5);
break;
case IDT75N43102:
err = init_idt43102(mc5);
break;
default:
CH_ERR(adap, "Unsupported TCAM type %d\n", mc5->part_type);
err = -EINVAL;
break;
}
mc5_dbgi_mode_disable(mc5);
return err;
}
/*
* read_mc5_range - dump a part of the memory managed by MC5
* @mc5: the MC5 handle
* @start: the start address for the dump
* @n: number of 72-bit words to read
* @buf: result buffer
*
* Read n 72-bit words from MC5 memory from the given start location.
*/
int t3_read_mc5_range(const struct mc5 *mc5, unsigned int start,
unsigned int n, u32 *buf)
{
u32 read_cmd;
int err = 0;
struct adapter *adap = mc5->adapter;
if (mc5->part_type == IDT75P52100)
read_cmd = IDT_CMD_READ;
else if (mc5->part_type == IDT75N43102)
read_cmd = IDT4_CMD_READ;
else
return -EINVAL;
mc5_dbgi_mode_enable(mc5);
while (n--) {
t3_write_reg(adap, A_MC5_DB_DBGI_REQ_ADDR0, start++);
if (mc5_cmd_write(adap, read_cmd)) {
err = -EIO;
break;
}
dbgi_rd_rsp3(adap, buf + 2, buf + 1, buf);
buf += 3;
}
mc5_dbgi_mode_disable(mc5);
return 0;
}
#define MC5_INT_FATAL (F_PARITYERR | F_REQQPARERR | F_DISPQPARERR)
/*
* MC5 interrupt handler
*/
void t3_mc5_intr_handler(struct mc5 *mc5)
{
struct adapter *adap = mc5->adapter;
u32 cause = t3_read_reg(adap, A_MC5_DB_INT_CAUSE);
if ((cause & F_PARITYERR) && mc5->parity_enabled) {
CH_ALERT(adap, "MC5 parity error\n");
mc5->stats.parity_err++;
}
if (cause & F_REQQPARERR) {
CH_ALERT(adap, "MC5 request queue parity error\n");
mc5->stats.reqq_parity_err++;
}
if (cause & F_DISPQPARERR) {
CH_ALERT(adap, "MC5 dispatch queue parity error\n");
mc5->stats.dispq_parity_err++;
}
if (cause & F_ACTRGNFULL)
mc5->stats.active_rgn_full++;
if (cause & F_NFASRCHFAIL)
mc5->stats.nfa_srch_err++;
if (cause & F_UNKNOWNCMD)
mc5->stats.unknown_cmd++;
if (cause & F_DELACTEMPTY)
mc5->stats.del_act_empty++;
if (cause & MC5_INT_FATAL)
t3_fatal_err(adap);
t3_write_reg(adap, A_MC5_DB_INT_CAUSE, cause);
}
void __devinit t3_mc5_prep(struct adapter *adapter, struct mc5 *mc5, int mode)
{
#define K * 1024
static unsigned int tcam_part_size[] = { /* in K 72-bit entries */
64 K, 128 K, 256 K, 32 K
};
#undef K
u32 cfg = t3_read_reg(adapter, A_MC5_DB_CONFIG);
mc5->adapter = adapter;
mc5->mode = (unsigned char)mode;
mc5->part_type = (unsigned char)G_TMTYPE(cfg);
if (cfg & F_TMTYPEHI)
mc5->part_type |= 4;
mc5->tcam_size = tcam_part_size[G_TMPARTSIZE(cfg)];
if (mode == MC5_MODE_144_BIT)
mc5->tcam_size /= 2;
}

2195
drivers/net/cxgb3/regs.h Normal file

File diff suppressed because it is too large Load Diff

2702
drivers/net/cxgb3/sge.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,251 @@
/*
* This file is automatically generated --- any changes will be lost.
*/
#ifndef _SGE_DEFS_H
#define _SGE_DEFS_H
#define S_EC_CREDITS 0
#define M_EC_CREDITS 0x7FFF
#define V_EC_CREDITS(x) ((x) << S_EC_CREDITS)
#define G_EC_CREDITS(x) (((x) >> S_EC_CREDITS) & M_EC_CREDITS)
#define S_EC_GTS 15
#define V_EC_GTS(x) ((x) << S_EC_GTS)
#define F_EC_GTS V_EC_GTS(1U)
#define S_EC_INDEX 16
#define M_EC_INDEX 0xFFFF
#define V_EC_INDEX(x) ((x) << S_EC_INDEX)
#define G_EC_INDEX(x) (((x) >> S_EC_INDEX) & M_EC_INDEX)
#define S_EC_SIZE 0
#define M_EC_SIZE 0xFFFF
#define V_EC_SIZE(x) ((x) << S_EC_SIZE)
#define G_EC_SIZE(x) (((x) >> S_EC_SIZE) & M_EC_SIZE)
#define S_EC_BASE_LO 16
#define M_EC_BASE_LO 0xFFFF
#define V_EC_BASE_LO(x) ((x) << S_EC_BASE_LO)
#define G_EC_BASE_LO(x) (((x) >> S_EC_BASE_LO) & M_EC_BASE_LO)
#define S_EC_BASE_HI 0
#define M_EC_BASE_HI 0xF
#define V_EC_BASE_HI(x) ((x) << S_EC_BASE_HI)
#define G_EC_BASE_HI(x) (((x) >> S_EC_BASE_HI) & M_EC_BASE_HI)
#define S_EC_RESPQ 4
#define M_EC_RESPQ 0x7
#define V_EC_RESPQ(x) ((x) << S_EC_RESPQ)
#define G_EC_RESPQ(x) (((x) >> S_EC_RESPQ) & M_EC_RESPQ)
#define S_EC_TYPE 7
#define M_EC_TYPE 0x7
#define V_EC_TYPE(x) ((x) << S_EC_TYPE)
#define G_EC_TYPE(x) (((x) >> S_EC_TYPE) & M_EC_TYPE)
#define S_EC_GEN 10
#define V_EC_GEN(x) ((x) << S_EC_GEN)
#define F_EC_GEN V_EC_GEN(1U)
#define S_EC_UP_TOKEN 11
#define M_EC_UP_TOKEN 0xFFFFF
#define V_EC_UP_TOKEN(x) ((x) << S_EC_UP_TOKEN)
#define G_EC_UP_TOKEN(x) (((x) >> S_EC_UP_TOKEN) & M_EC_UP_TOKEN)
#define S_EC_VALID 31
#define V_EC_VALID(x) ((x) << S_EC_VALID)
#define F_EC_VALID V_EC_VALID(1U)
#define S_RQ_MSI_VEC 20
#define M_RQ_MSI_VEC 0x3F
#define V_RQ_MSI_VEC(x) ((x) << S_RQ_MSI_VEC)
#define G_RQ_MSI_VEC(x) (((x) >> S_RQ_MSI_VEC) & M_RQ_MSI_VEC)
#define S_RQ_INTR_EN 26
#define V_RQ_INTR_EN(x) ((x) << S_RQ_INTR_EN)
#define F_RQ_INTR_EN V_RQ_INTR_EN(1U)
#define S_RQ_GEN 28
#define V_RQ_GEN(x) ((x) << S_RQ_GEN)
#define F_RQ_GEN V_RQ_GEN(1U)
#define S_CQ_INDEX 0
#define M_CQ_INDEX 0xFFFF
#define V_CQ_INDEX(x) ((x) << S_CQ_INDEX)
#define G_CQ_INDEX(x) (((x) >> S_CQ_INDEX) & M_CQ_INDEX)
#define S_CQ_SIZE 16
#define M_CQ_SIZE 0xFFFF
#define V_CQ_SIZE(x) ((x) << S_CQ_SIZE)
#define G_CQ_SIZE(x) (((x) >> S_CQ_SIZE) & M_CQ_SIZE)
#define S_CQ_BASE_HI 0
#define M_CQ_BASE_HI 0xFFFFF
#define V_CQ_BASE_HI(x) ((x) << S_CQ_BASE_HI)
#define G_CQ_BASE_HI(x) (((x) >> S_CQ_BASE_HI) & M_CQ_BASE_HI)
#define S_CQ_RSPQ 20
#define M_CQ_RSPQ 0x3F
#define V_CQ_RSPQ(x) ((x) << S_CQ_RSPQ)
#define G_CQ_RSPQ(x) (((x) >> S_CQ_RSPQ) & M_CQ_RSPQ)
#define S_CQ_ASYNC_NOTIF 26
#define V_CQ_ASYNC_NOTIF(x) ((x) << S_CQ_ASYNC_NOTIF)
#define F_CQ_ASYNC_NOTIF V_CQ_ASYNC_NOTIF(1U)
#define S_CQ_ARMED 27
#define V_CQ_ARMED(x) ((x) << S_CQ_ARMED)
#define F_CQ_ARMED V_CQ_ARMED(1U)
#define S_CQ_ASYNC_NOTIF_SOL 28
#define V_CQ_ASYNC_NOTIF_SOL(x) ((x) << S_CQ_ASYNC_NOTIF_SOL)
#define F_CQ_ASYNC_NOTIF_SOL V_CQ_ASYNC_NOTIF_SOL(1U)
#define S_CQ_GEN 29
#define V_CQ_GEN(x) ((x) << S_CQ_GEN)
#define F_CQ_GEN V_CQ_GEN(1U)
#define S_CQ_OVERFLOW_MODE 31
#define V_CQ_OVERFLOW_MODE(x) ((x) << S_CQ_OVERFLOW_MODE)
#define F_CQ_OVERFLOW_MODE V_CQ_OVERFLOW_MODE(1U)
#define S_CQ_CREDITS 0
#define M_CQ_CREDITS 0xFFFF
#define V_CQ_CREDITS(x) ((x) << S_CQ_CREDITS)
#define G_CQ_CREDITS(x) (((x) >> S_CQ_CREDITS) & M_CQ_CREDITS)
#define S_CQ_CREDIT_THRES 16
#define M_CQ_CREDIT_THRES 0x1FFF
#define V_CQ_CREDIT_THRES(x) ((x) << S_CQ_CREDIT_THRES)
#define G_CQ_CREDIT_THRES(x) (((x) >> S_CQ_CREDIT_THRES) & M_CQ_CREDIT_THRES)
#define S_FL_BASE_HI 0
#define M_FL_BASE_HI 0xFFFFF
#define V_FL_BASE_HI(x) ((x) << S_FL_BASE_HI)
#define G_FL_BASE_HI(x) (((x) >> S_FL_BASE_HI) & M_FL_BASE_HI)
#define S_FL_INDEX_LO 20
#define M_FL_INDEX_LO 0xFFF
#define V_FL_INDEX_LO(x) ((x) << S_FL_INDEX_LO)
#define G_FL_INDEX_LO(x) (((x) >> S_FL_INDEX_LO) & M_FL_INDEX_LO)
#define S_FL_INDEX_HI 0
#define M_FL_INDEX_HI 0xF
#define V_FL_INDEX_HI(x) ((x) << S_FL_INDEX_HI)
#define G_FL_INDEX_HI(x) (((x) >> S_FL_INDEX_HI) & M_FL_INDEX_HI)
#define S_FL_SIZE 4
#define M_FL_SIZE 0xFFFF
#define V_FL_SIZE(x) ((x) << S_FL_SIZE)
#define G_FL_SIZE(x) (((x) >> S_FL_SIZE) & M_FL_SIZE)
#define S_FL_GEN 20
#define V_FL_GEN(x) ((x) << S_FL_GEN)
#define F_FL_GEN V_FL_GEN(1U)
#define S_FL_ENTRY_SIZE_LO 21
#define M_FL_ENTRY_SIZE_LO 0x7FF
#define V_FL_ENTRY_SIZE_LO(x) ((x) << S_FL_ENTRY_SIZE_LO)
#define G_FL_ENTRY_SIZE_LO(x) (((x) >> S_FL_ENTRY_SIZE_LO) & M_FL_ENTRY_SIZE_LO)
#define S_FL_ENTRY_SIZE_HI 0
#define M_FL_ENTRY_SIZE_HI 0x1FFFFF
#define V_FL_ENTRY_SIZE_HI(x) ((x) << S_FL_ENTRY_SIZE_HI)
#define G_FL_ENTRY_SIZE_HI(x) (((x) >> S_FL_ENTRY_SIZE_HI) & M_FL_ENTRY_SIZE_HI)
#define S_FL_CONG_THRES 21
#define M_FL_CONG_THRES 0x3FF
#define V_FL_CONG_THRES(x) ((x) << S_FL_CONG_THRES)
#define G_FL_CONG_THRES(x) (((x) >> S_FL_CONG_THRES) & M_FL_CONG_THRES)
#define S_FL_GTS 31
#define V_FL_GTS(x) ((x) << S_FL_GTS)
#define F_FL_GTS V_FL_GTS(1U)
#define S_FLD_GEN1 31
#define V_FLD_GEN1(x) ((x) << S_FLD_GEN1)
#define F_FLD_GEN1 V_FLD_GEN1(1U)
#define S_FLD_GEN2 0
#define V_FLD_GEN2(x) ((x) << S_FLD_GEN2)
#define F_FLD_GEN2 V_FLD_GEN2(1U)
#define S_RSPD_TXQ1_CR 0
#define M_RSPD_TXQ1_CR 0x7F
#define V_RSPD_TXQ1_CR(x) ((x) << S_RSPD_TXQ1_CR)
#define G_RSPD_TXQ1_CR(x) (((x) >> S_RSPD_TXQ1_CR) & M_RSPD_TXQ1_CR)
#define S_RSPD_TXQ1_GTS 7
#define V_RSPD_TXQ1_GTS(x) ((x) << S_RSPD_TXQ1_GTS)
#define F_RSPD_TXQ1_GTS V_RSPD_TXQ1_GTS(1U)
#define S_RSPD_TXQ2_CR 8
#define M_RSPD_TXQ2_CR 0x7F
#define V_RSPD_TXQ2_CR(x) ((x) << S_RSPD_TXQ2_CR)
#define G_RSPD_TXQ2_CR(x) (((x) >> S_RSPD_TXQ2_CR) & M_RSPD_TXQ2_CR)
#define S_RSPD_TXQ2_GTS 15
#define V_RSPD_TXQ2_GTS(x) ((x) << S_RSPD_TXQ2_GTS)
#define F_RSPD_TXQ2_GTS V_RSPD_TXQ2_GTS(1U)
#define S_RSPD_TXQ0_CR 16
#define M_RSPD_TXQ0_CR 0x7F
#define V_RSPD_TXQ0_CR(x) ((x) << S_RSPD_TXQ0_CR)
#define G_RSPD_TXQ0_CR(x) (((x) >> S_RSPD_TXQ0_CR) & M_RSPD_TXQ0_CR)
#define S_RSPD_TXQ0_GTS 23
#define V_RSPD_TXQ0_GTS(x) ((x) << S_RSPD_TXQ0_GTS)
#define F_RSPD_TXQ0_GTS V_RSPD_TXQ0_GTS(1U)
#define S_RSPD_EOP 24
#define V_RSPD_EOP(x) ((x) << S_RSPD_EOP)
#define F_RSPD_EOP V_RSPD_EOP(1U)
#define S_RSPD_SOP 25
#define V_RSPD_SOP(x) ((x) << S_RSPD_SOP)
#define F_RSPD_SOP V_RSPD_SOP(1U)
#define S_RSPD_ASYNC_NOTIF 26
#define V_RSPD_ASYNC_NOTIF(x) ((x) << S_RSPD_ASYNC_NOTIF)
#define F_RSPD_ASYNC_NOTIF V_RSPD_ASYNC_NOTIF(1U)
#define S_RSPD_FL0_GTS 27
#define V_RSPD_FL0_GTS(x) ((x) << S_RSPD_FL0_GTS)
#define F_RSPD_FL0_GTS V_RSPD_FL0_GTS(1U)
#define S_RSPD_FL1_GTS 28
#define V_RSPD_FL1_GTS(x) ((x) << S_RSPD_FL1_GTS)
#define F_RSPD_FL1_GTS V_RSPD_FL1_GTS(1U)
#define S_RSPD_IMM_DATA_VALID 29
#define V_RSPD_IMM_DATA_VALID(x) ((x) << S_RSPD_IMM_DATA_VALID)
#define F_RSPD_IMM_DATA_VALID V_RSPD_IMM_DATA_VALID(1U)
#define S_RSPD_OFFLOAD 30
#define V_RSPD_OFFLOAD(x) ((x) << S_RSPD_OFFLOAD)
#define F_RSPD_OFFLOAD V_RSPD_OFFLOAD(1U)
#define S_RSPD_GEN1 31
#define V_RSPD_GEN1(x) ((x) << S_RSPD_GEN1)
#define F_RSPD_GEN1 V_RSPD_GEN1(1U)
#define S_RSPD_LEN 0
#define M_RSPD_LEN 0x7FFFFFFF
#define V_RSPD_LEN(x) ((x) << S_RSPD_LEN)
#define G_RSPD_LEN(x) (((x) >> S_RSPD_LEN) & M_RSPD_LEN)
#define S_RSPD_FLQ 31
#define V_RSPD_FLQ(x) ((x) << S_RSPD_FLQ)
#define F_RSPD_FLQ V_RSPD_FLQ(1U)
#define S_RSPD_GEN2 0
#define V_RSPD_GEN2(x) ((x) << S_RSPD_GEN2)
#define F_RSPD_GEN2 V_RSPD_GEN2(1U)
#define S_RSPD_INR_VEC 1
#define M_RSPD_INR_VEC 0x7F
#define V_RSPD_INR_VEC(x) ((x) << S_RSPD_INR_VEC)
#define G_RSPD_INR_VEC(x) (((x) >> S_RSPD_INR_VEC) & M_RSPD_INR_VEC)
#endif /* _SGE_DEFS_H */

1426
drivers/net/cxgb3/t3_cpl.h Normal file

File diff suppressed because it is too large Load Diff

3354
drivers/net/cxgb3/t3_hw.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,72 @@
/*
* Copyright (C) 2003-2006 Chelsio Communications. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef _T3CDEV_H_
#define _T3CDEV_H_
#include <linux/list.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <net/neighbour.h>
#define T3CNAMSIZ 16
/* Get the t3cdev associated with a net_device */
#define T3CDEV(netdev) (struct t3cdev *)(netdev->priv)
struct cxgb3_client;
enum t3ctype {
T3A = 0,
T3B
};
struct t3cdev {
char name[T3CNAMSIZ]; /* T3C device name */
enum t3ctype type;
struct list_head ofld_dev_list; /* for list linking */
struct net_device *lldev; /* LL dev associated with T3C messages */
struct proc_dir_entry *proc_dir; /* root of proc dir for this T3C */
int (*send)(struct t3cdev *dev, struct sk_buff *skb);
int (*recv)(struct t3cdev *dev, struct sk_buff **skb, int n);
int (*ctl)(struct t3cdev *dev, unsigned int req, void *data);
void (*neigh_update)(struct t3cdev *dev, struct neighbour *neigh);
void *priv; /* driver private data */
void *l2opt; /* optional layer 2 data */
void *l3opt; /* optional layer 3 data */
void *l4opt; /* optional layer 4 data */
void *ulp; /* ulp stuff */
};
#endif /* _T3CDEV_H_ */

View File

@ -0,0 +1,24 @@
/*****************************************************************************
* *
* File: *
* version.h *
* *
* Description: *
* Chelsio driver version defines. *
* *
* Copyright (c) 2003 - 2006 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* http://www.chelsio.com *
* *
****************************************************************************/
/* $Date: 2006/10/31 18:57:51 $ $RCSfile: version.h,v $ $Revision: 1.3 $ */
#ifndef __CHELSIO_VERSION_H
#define __CHELSIO_VERSION_H
#define DRV_DESC "Chelsio T3 Network Driver"
#define DRV_NAME "cxgb3"
/* Driver version */
#define DRV_VERSION "1.0"
#endif /* __CHELSIO_VERSION_H */

208
drivers/net/cxgb3/vsc8211.c Normal file
View File

@ -0,0 +1,208 @@
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
/* VSC8211 PHY specific registers. */
enum {
VSC8211_INTR_ENABLE = 25,
VSC8211_INTR_STATUS = 26,
VSC8211_AUX_CTRL_STAT = 28,
};
enum {
VSC_INTR_RX_ERR = 1 << 0,
VSC_INTR_MS_ERR = 1 << 1, /* master/slave resolution error */
VSC_INTR_CABLE = 1 << 2, /* cable impairment */
VSC_INTR_FALSE_CARR = 1 << 3, /* false carrier */
VSC_INTR_MEDIA_CHG = 1 << 4, /* AMS media change */
VSC_INTR_RX_FIFO = 1 << 5, /* Rx FIFO over/underflow */
VSC_INTR_TX_FIFO = 1 << 6, /* Tx FIFO over/underflow */
VSC_INTR_DESCRAMBL = 1 << 7, /* descrambler lock-lost */
VSC_INTR_SYMBOL_ERR = 1 << 8, /* symbol error */
VSC_INTR_NEG_DONE = 1 << 10, /* autoneg done */
VSC_INTR_NEG_ERR = 1 << 11, /* autoneg error */
VSC_INTR_LINK_CHG = 1 << 13, /* link change */
VSC_INTR_ENABLE = 1 << 15, /* interrupt enable */
};
#define CFG_CHG_INTR_MASK (VSC_INTR_LINK_CHG | VSC_INTR_NEG_ERR | \
VSC_INTR_NEG_DONE)
#define INTR_MASK (CFG_CHG_INTR_MASK | VSC_INTR_TX_FIFO | VSC_INTR_RX_FIFO | \
VSC_INTR_ENABLE)
/* PHY specific auxiliary control & status register fields */
#define S_ACSR_ACTIPHY_TMR 0
#define M_ACSR_ACTIPHY_TMR 0x3
#define V_ACSR_ACTIPHY_TMR(x) ((x) << S_ACSR_ACTIPHY_TMR)
#define S_ACSR_SPEED 3
#define M_ACSR_SPEED 0x3
#define G_ACSR_SPEED(x) (((x) >> S_ACSR_SPEED) & M_ACSR_SPEED)
#define S_ACSR_DUPLEX 5
#define F_ACSR_DUPLEX (1 << S_ACSR_DUPLEX)
#define S_ACSR_ACTIPHY 6
#define F_ACSR_ACTIPHY (1 << S_ACSR_ACTIPHY)
/*
* Reset the PHY. This PHY completes reset immediately so we never wait.
*/
static int vsc8211_reset(struct cphy *cphy, int wait)
{
return t3_phy_reset(cphy, 0, 0);
}
static int vsc8211_intr_enable(struct cphy *cphy)
{
return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, INTR_MASK);
}
static int vsc8211_intr_disable(struct cphy *cphy)
{
return mdio_write(cphy, 0, VSC8211_INTR_ENABLE, 0);
}
static int vsc8211_intr_clear(struct cphy *cphy)
{
u32 val;
/* Clear PHY interrupts by reading the register. */
return mdio_read(cphy, 0, VSC8211_INTR_STATUS, &val);
}
static int vsc8211_autoneg_enable(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANENABLE | BMCR_ANRESTART);
}
static int vsc8211_autoneg_restart(struct cphy *cphy)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN | BMCR_ISOLATE,
BMCR_ANRESTART);
}
static int vsc8211_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
unsigned int bmcr, status, lpa, adv;
int err, sp = -1, dplx = -1, pause = 0;
err = mdio_read(cphy, 0, MII_BMCR, &bmcr);
if (!err)
err = mdio_read(cphy, 0, MII_BMSR, &status);
if (err)
return err;
if (link_ok) {
/*
* BMSR_LSTATUS is latch-low, so if it is 0 we need to read it
* once more to get the current link state.
*/
if (!(status & BMSR_LSTATUS))
err = mdio_read(cphy, 0, MII_BMSR, &status);
if (err)
return err;
*link_ok = (status & BMSR_LSTATUS) != 0;
}
if (!(bmcr & BMCR_ANENABLE)) {
dplx = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
sp = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
sp = SPEED_100;
else
sp = SPEED_10;
} else if (status & BMSR_ANEGCOMPLETE) {
err = mdio_read(cphy, 0, VSC8211_AUX_CTRL_STAT, &status);
if (err)
return err;
dplx = (status & F_ACSR_DUPLEX) ? DUPLEX_FULL : DUPLEX_HALF;
sp = G_ACSR_SPEED(status);
if (sp == 0)
sp = SPEED_10;
else if (sp == 1)
sp = SPEED_100;
else
sp = SPEED_1000;
if (fc && dplx == DUPLEX_FULL) {
err = mdio_read(cphy, 0, MII_LPA, &lpa);
if (!err)
err = mdio_read(cphy, 0, MII_ADVERTISE, &adv);
if (err)
return err;
if (lpa & adv & ADVERTISE_PAUSE_CAP)
pause = PAUSE_RX | PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_CAP) &&
(lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_ASYM))
pause = PAUSE_TX;
else if ((lpa & ADVERTISE_PAUSE_ASYM) &&
(adv & ADVERTISE_PAUSE_CAP))
pause = PAUSE_RX;
}
}
if (speed)
*speed = sp;
if (duplex)
*duplex = dplx;
if (fc)
*fc = pause;
return 0;
}
static int vsc8211_power_down(struct cphy *cphy, int enable)
{
return t3_mdio_change_bits(cphy, 0, MII_BMCR, BMCR_PDOWN,
enable ? BMCR_PDOWN : 0);
}
static int vsc8211_intr_handler(struct cphy *cphy)
{
unsigned int cause;
int err, cphy_cause = 0;
err = mdio_read(cphy, 0, VSC8211_INTR_STATUS, &cause);
if (err)
return err;
cause &= INTR_MASK;
if (cause & CFG_CHG_INTR_MASK)
cphy_cause |= cphy_cause_link_change;
if (cause & (VSC_INTR_RX_FIFO | VSC_INTR_TX_FIFO))
cphy_cause |= cphy_cause_fifo_error;
return cphy_cause;
}
static struct cphy_ops vsc8211_ops = {
.reset = vsc8211_reset,
.intr_enable = vsc8211_intr_enable,
.intr_disable = vsc8211_intr_disable,
.intr_clear = vsc8211_intr_clear,
.intr_handler = vsc8211_intr_handler,
.autoneg_enable = vsc8211_autoneg_enable,
.autoneg_restart = vsc8211_autoneg_restart,
.advertise = t3_phy_advertise,
.set_speed_duplex = t3_set_phy_speed_duplex,
.get_link_status = vsc8211_get_link_status,
.power_down = vsc8211_power_down,
};
void t3_vsc8211_phy_prep(struct cphy *phy, struct adapter *adapter,
int phy_addr, const struct mdio_ops *mdio_ops)
{
cphy_init(phy, adapter, phy_addr, &vsc8211_ops, mdio_ops);
}

389
drivers/net/cxgb3/xgmac.c Normal file
View File

@ -0,0 +1,389 @@
/*
* This file is part of the Chelsio T3 Ethernet driver.
*
* Copyright (C) 2005-2006 Chelsio Communications. All rights reserved.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
* release for licensing terms and conditions.
*/
#include "common.h"
#include "regs.h"
/*
* # of exact address filters. The first one is used for the station address,
* the rest are available for multicast addresses.
*/
#define EXACT_ADDR_FILTERS 8
static inline int macidx(const struct cmac *mac)
{
return mac->offset / (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR);
}
static void xaui_serdes_reset(struct cmac *mac)
{
static const unsigned int clear[] = {
F_PWRDN0 | F_PWRDN1, F_RESETPLL01, F_RESET0 | F_RESET1,
F_PWRDN2 | F_PWRDN3, F_RESETPLL23, F_RESET2 | F_RESET3
};
int i;
struct adapter *adap = mac->adapter;
u32 ctrl = A_XGM_SERDES_CTRL0 + mac->offset;
t3_write_reg(adap, ctrl, adap->params.vpd.xauicfg[macidx(mac)] |
F_RESET3 | F_RESET2 | F_RESET1 | F_RESET0 |
F_PWRDN3 | F_PWRDN2 | F_PWRDN1 | F_PWRDN0 |
F_RESETPLL23 | F_RESETPLL01);
t3_read_reg(adap, ctrl);
udelay(15);
for (i = 0; i < ARRAY_SIZE(clear); i++) {
t3_set_reg_field(adap, ctrl, clear[i], 0);
udelay(15);
}
}
void t3b_pcs_reset(struct cmac *mac)
{
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset,
F_PCS_RESET_, 0);
udelay(20);
t3_set_reg_field(mac->adapter, A_XGM_RESET_CTRL + mac->offset, 0,
F_PCS_RESET_);
}
int t3_mac_reset(struct cmac *mac)
{
static const struct addr_val_pair mac_reset_avp[] = {
{A_XGM_TX_CTRL, 0},
{A_XGM_RX_CTRL, 0},
{A_XGM_RX_CFG, F_DISPAUSEFRAMES | F_EN1536BFRAMES |
F_RMFCS | F_ENJUMBO | F_ENHASHMCAST},
{A_XGM_RX_HASH_LOW, 0},
{A_XGM_RX_HASH_HIGH, 0},
{A_XGM_RX_EXACT_MATCH_LOW_1, 0},
{A_XGM_RX_EXACT_MATCH_LOW_2, 0},
{A_XGM_RX_EXACT_MATCH_LOW_3, 0},
{A_XGM_RX_EXACT_MATCH_LOW_4, 0},
{A_XGM_RX_EXACT_MATCH_LOW_5, 0},
{A_XGM_RX_EXACT_MATCH_LOW_6, 0},
{A_XGM_RX_EXACT_MATCH_LOW_7, 0},
{A_XGM_RX_EXACT_MATCH_LOW_8, 0},
{A_XGM_STAT_CTRL, F_CLRSTATS}
};
u32 val;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, F_MAC_RESET_);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
t3_write_regs(adap, mac_reset_avp, ARRAY_SIZE(mac_reset_avp), oft);
t3_set_reg_field(adap, A_XGM_RXFIFO_CFG + oft,
F_RXSTRFRWRD | F_DISERRFRAMES,
uses_xaui(adap) ? 0 : F_RXSTRFRWRD);
if (uses_xaui(adap)) {
if (adap->params.rev == 0) {
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_RXENABLE | F_TXENABLE);
if (t3_wait_op_done(adap, A_XGM_SERDES_STATUS1 + oft,
F_CMULOCK, 1, 5, 2)) {
CH_ERR(adap,
"MAC %d XAUI SERDES CMU lock failed\n",
macidx(mac));
return -1;
}
t3_set_reg_field(adap, A_XGM_SERDES_CTRL + oft, 0,
F_SERDESRESET_);
} else
xaui_serdes_reset(mac);
}
if (adap->params.rev > 0)
t3_write_reg(adap, A_XGM_PAUSE_TIMER + oft, 0xf000);
val = F_MAC_RESET_;
if (is_10G(adap))
val |= F_PCS_RESET_;
else if (uses_xaui(adap))
val |= F_PCS_RESET_ | F_XG2G_RESET_;
else
val |= F_RGMII_RESET_ | F_XG2G_RESET_;
t3_write_reg(adap, A_XGM_RESET_CTRL + oft, val);
t3_read_reg(adap, A_XGM_RESET_CTRL + oft); /* flush */
if ((val & F_PCS_RESET_) && adap->params.rev) {
msleep(1);
t3b_pcs_reset(mac);
}
memset(&mac->stats, 0, sizeof(mac->stats));
return 0;
}
/*
* Set the exact match register 'idx' to recognize the given Ethernet address.
*/
static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
{
u32 addr_lo, addr_hi;
unsigned int oft = mac->offset + idx * 8;
addr_lo = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
addr_hi = (addr[5] << 8) | addr[4];
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_LOW_1 + oft, addr_lo);
t3_write_reg(mac->adapter, A_XGM_RX_EXACT_MATCH_HIGH_1 + oft, addr_hi);
}
/* Set one of the station's unicast MAC addresses. */
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
set_addr_filter(mac, idx, addr);
return 0;
}
/*
* Specify the number of exact address filters that should be reserved for
* unicast addresses. Caller should reload the unicast and multicast addresses
* after calling this.
*/
int t3_mac_set_num_ucast(struct cmac *mac, int n)
{
if (n > EXACT_ADDR_FILTERS)
return -EINVAL;
mac->nucast = n;
return 0;
}
/* Calculate the RX hash filter index of an Ethernet address */
static int hash_hw_addr(const u8 * addr)
{
int hash = 0, octet, bit, i = 0, c;
for (octet = 0; octet < 6; ++octet)
for (c = addr[octet], bit = 0; bit < 8; c >>= 1, ++bit) {
hash ^= (c & 1) << i;
if (++i == 6)
i = 0;
}
return hash;
}
int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
{
u32 val, hash_lo, hash_hi;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
if (rm->dev->flags & IFF_PROMISC)
val |= F_COPYALLFRAMES;
t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
if (rm->dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
u8 *addr;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
while ((addr = t3_get_next_mcaddr(rm)))
if (exact_addr_idx < EXACT_ADDR_FILTERS)
set_addr_filter(mac, exact_addr_idx++, addr);
else {
int hash = hash_hw_addr(addr);
if (hash < 32)
hash_lo |= (1 << hash);
else
hash_hi |= (1 << (hash - 32));
}
}
t3_write_reg(adap, A_XGM_RX_HASH_LOW + oft, hash_lo);
t3_write_reg(adap, A_XGM_RX_HASH_HIGH + oft, hash_hi);
return 0;
}
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
{
int hwm, lwm;
unsigned int thres, v;
struct adapter *adap = mac->adapter;
/*
* MAX_FRAME_SIZE inludes header + FCS, mtu doesn't. The HW max
* packet size register includes header, but not FCS.
*/
mtu += 14;
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
/*
* Adjust the PAUSE frame watermarks. We always set the LWM, and the
* HWM only if flow-control is enabled.
*/
hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, MAC_RXFIFO_SIZE / 2U);
hwm = min(hwm, 3 * MAC_RXFIFO_SIZE / 4 + 1024);
lwm = hwm - 1024;
v = t3_read_reg(adap, A_XGM_RXFIFO_CFG + mac->offset);
v &= ~V_RXFIFOPAUSELWM(M_RXFIFOPAUSELWM);
v |= V_RXFIFOPAUSELWM(lwm / 8);
if (G_RXFIFOPAUSEHWM(v))
v = (v & ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM)) |
V_RXFIFOPAUSEHWM(hwm / 8);
t3_write_reg(adap, A_XGM_RXFIFO_CFG + mac->offset, v);
/* Adjust the TX FIFO threshold based on the MTU */
thres = (adap->params.vpd.cclk * 1000) / 15625;
thres = (thres * mtu) / 1000;
if (is_10G(adap))
thres /= 10;
thres = mtu > thres ? (mtu - thres + 7) / 8 : 0;
thres = max(thres, 8U); /* need at least 8 */
t3_set_reg_field(adap, A_XGM_TXFIFO_CFG + mac->offset,
V_TXFIFOTHRESH(M_TXFIFOTHRESH), V_TXFIFOTHRESH(thres));
return 0;
}
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc)
{
u32 val;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
if (duplex >= 0 && duplex != DUPLEX_FULL)
return -EINVAL;
if (speed >= 0) {
if (speed == SPEED_10)
val = V_PORTSPEED(0);
else if (speed == SPEED_100)
val = V_PORTSPEED(1);
else if (speed == SPEED_1000)
val = V_PORTSPEED(2);
else if (speed == SPEED_10000)
val = V_PORTSPEED(3);
else
return -EINVAL;
t3_set_reg_field(adap, A_XGM_PORT_CFG + oft,
V_PORTSPEED(M_PORTSPEED), val);
}
val = t3_read_reg(adap, A_XGM_RXFIFO_CFG + oft);
val &= ~V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM);
if (fc & PAUSE_TX)
val |= V_RXFIFOPAUSEHWM(G_RXFIFOPAUSELWM(val) + 128); /* +1KB */
t3_write_reg(adap, A_XGM_RXFIFO_CFG + oft, val);
t3_set_reg_field(adap, A_XGM_TX_CFG + oft, F_TXPAUSEEN,
(fc & PAUSE_RX) ? F_TXPAUSEEN : 0);
return 0;
}
int t3_mac_enable(struct cmac *mac, int which)
{
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + oft, F_TXEN);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xbf000001);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 1 << idx);
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + oft, F_RXEN);
return 0;
}
int t3_mac_disable(struct cmac *mac, int which)
{
int idx = macidx(mac);
struct adapter *adap = mac->adapter;
if (which & MAC_DIRECTION_TX) {
t3_write_reg(adap, A_XGM_TX_CTRL + mac->offset, 0);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_CFG_CH0 + idx);
t3_write_reg(adap, A_TP_PIO_DATA, 0xc000001f);
t3_write_reg(adap, A_TP_PIO_ADDR, A_TP_TX_DROP_MODE);
t3_set_reg_field(adap, A_TP_PIO_DATA, 1 << idx, 0);
}
if (which & MAC_DIRECTION_RX)
t3_write_reg(adap, A_XGM_RX_CTRL + mac->offset, 0);
return 0;
}
/*
* This function is called periodically to accumulate the current values of the
* RMON counters into the port statistics. Since the packet counters are only
* 32 bits they can overflow in ~286 secs at 10G, so the function should be
* called more frequently than that. The byte counters are 45-bit wide, they
* would overflow in ~7.8 hours.
*/
const struct mac_stats *t3_mac_update_stats(struct cmac *mac)
{
#define RMON_READ(mac, addr) t3_read_reg(mac->adapter, addr + mac->offset)
#define RMON_UPDATE(mac, name, reg) \
(mac)->stats.name += (u64)RMON_READ(mac, A_XGM_STAT_##reg)
#define RMON_UPDATE64(mac, name, reg_lo, reg_hi) \
(mac)->stats.name += RMON_READ(mac, A_XGM_STAT_##reg_lo) + \
((u64)RMON_READ(mac, A_XGM_STAT_##reg_hi) << 32)
u32 v, lo;
RMON_UPDATE64(mac, rx_octets, RX_BYTES_LOW, RX_BYTES_HIGH);
RMON_UPDATE64(mac, rx_frames, RX_FRAMES_LOW, RX_FRAMES_HIGH);
RMON_UPDATE(mac, rx_mcast_frames, RX_MCAST_FRAMES);
RMON_UPDATE(mac, rx_bcast_frames, RX_BCAST_FRAMES);
RMON_UPDATE(mac, rx_fcs_errs, RX_CRC_ERR_FRAMES);
RMON_UPDATE(mac, rx_pause, RX_PAUSE_FRAMES);
RMON_UPDATE(mac, rx_jabber, RX_JABBER_FRAMES);
RMON_UPDATE(mac, rx_short, RX_SHORT_FRAMES);
RMON_UPDATE(mac, rx_symbol_errs, RX_SYM_CODE_ERR_FRAMES);
RMON_UPDATE(mac, rx_too_long, RX_OVERSIZE_FRAMES);
mac->stats.rx_too_long += RMON_READ(mac, A_XGM_RX_MAX_PKT_SIZE_ERR_CNT);
RMON_UPDATE(mac, rx_frames_64, RX_64B_FRAMES);
RMON_UPDATE(mac, rx_frames_65_127, RX_65_127B_FRAMES);
RMON_UPDATE(mac, rx_frames_128_255, RX_128_255B_FRAMES);
RMON_UPDATE(mac, rx_frames_256_511, RX_256_511B_FRAMES);
RMON_UPDATE(mac, rx_frames_512_1023, RX_512_1023B_FRAMES);
RMON_UPDATE(mac, rx_frames_1024_1518, RX_1024_1518B_FRAMES);
RMON_UPDATE(mac, rx_frames_1519_max, RX_1519_MAXB_FRAMES);
RMON_UPDATE64(mac, tx_octets, TX_BYTE_LOW, TX_BYTE_HIGH);
RMON_UPDATE64(mac, tx_frames, TX_FRAME_LOW, TX_FRAME_HIGH);
RMON_UPDATE(mac, tx_mcast_frames, TX_MCAST);
RMON_UPDATE(mac, tx_bcast_frames, TX_BCAST);
RMON_UPDATE(mac, tx_pause, TX_PAUSE);
/* This counts error frames in general (bad FCS, underrun, etc). */
RMON_UPDATE(mac, tx_underrun, TX_ERR_FRAMES);
RMON_UPDATE(mac, tx_frames_64, TX_64B_FRAMES);
RMON_UPDATE(mac, tx_frames_65_127, TX_65_127B_FRAMES);
RMON_UPDATE(mac, tx_frames_128_255, TX_128_255B_FRAMES);
RMON_UPDATE(mac, tx_frames_256_511, TX_256_511B_FRAMES);
RMON_UPDATE(mac, tx_frames_512_1023, TX_512_1023B_FRAMES);
RMON_UPDATE(mac, tx_frames_1024_1518, TX_1024_1518B_FRAMES);
RMON_UPDATE(mac, tx_frames_1519_max, TX_1519_MAXB_FRAMES);
/* The next stat isn't clear-on-read. */
t3_write_reg(mac->adapter, A_TP_MIB_INDEX, mac->offset ? 51 : 50);
v = t3_read_reg(mac->adapter, A_TP_MIB_RDATA);
lo = (u32) mac->stats.rx_cong_drops;
mac->stats.rx_cong_drops += (u64) (v - lo);
return &mac->stats;
}