cxgb4: introduce SMT ops to prepare for SMAC rewrite support

Introduce SMT operations for allocating/removing entries from
SMAC table. Make TCAM filters use the SMT ops whenever SMAC rewrite
is required.

Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com>
Signed-off-by: Rahul Lakkireddy <rahul.lakkireddy@chelsio.com>
Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Kumar Sanghvi 2017-10-18 20:49:11 +05:30 committed by David S. Miller
parent 27ece1f357
commit 3bdb376e69
8 changed files with 519 additions and 19 deletions

View File

@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
cudbg_common.o cudbg_lib.o

View File

@ -858,6 +858,7 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
struct smt_data *smt;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@ -1098,9 +1099,9 @@ struct filter_entry {
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */
u32 smtidx:8; /* Source MAC Table index for smac */
struct filter_ctx *ctx; /* Caller's completion hook */
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
struct smt_entry *smt; /* Source Mac Table entry for smac */
struct net_device *dev; /* Associated net device */
u32 tid; /* This will store the actual tid */

View File

@ -34,7 +34,9 @@
#include "cxgb4.h"
#include "t4_regs.h"
#include "t4_tcb.h"
#include "l2t.h"
#include "smt.h"
#include "t4fw_api.h"
#include "cxgb4_filter.h"
@ -332,6 +334,21 @@ int set_filter_wr(struct adapter *adapter, int fidx)
}
}
/* If the new filter requires loopback Source MAC rewriting then
* we need to allocate a SMT entry for the filter.
*/
if (f->fs.newsmac) {
f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
if (!f->smt) {
if (f->l2t) {
cxgb4_l2t_release(f->l2t);
f->l2t = NULL;
}
kfree_skb(skb);
return -ENOMEM;
}
}
fwr = __skb_put_zero(skb, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
@ -357,7 +374,6 @@ int set_filter_wr(struct adapter *adapter, int fidx)
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
@ -404,8 +420,6 @@ int set_filter_wr(struct adapter *adapter, int fidx)
fwr->lpm = htons(f->fs.mask.lport);
fwr->fp = htons(f->fs.val.fport);
fwr->fpm = htons(f->fs.mask.fport);
if (f->fs.newsmac)
memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
@ -463,6 +477,9 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->l2t)
cxgb4_l2t_release(f->l2t);
if (f->smt)
cxgb4_smt_release(f->smt);
/* The zeroing of the filter rule below clears the filter valid,
* pending, locked flags, l2t pointer, etc. so it's all we need for
* this operation.
@ -757,6 +774,62 @@ int cxgb4_del_filter(struct net_device *dev, int filter_id)
return ret;
}
static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, u16 word, u64 mask, u64 val,
int no_reply)
{
struct cpl_set_tcb_field *req;
struct sk_buff *skb;
skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
if (!skb)
return -ENOMEM;
req = (struct cpl_set_tcb_field *)__skb_put(skb, sizeof(*req));
memset(req, 0, sizeof(*req));
INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
req->reply_ctrl = htons(REPLY_CHAN_V(0) |
QUEUENO_V(adap->sge.fw_evtq.abs_id) |
NO_REPLY_V(no_reply));
req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
req->mask = cpu_to_be64(mask);
req->val = cpu_to_be64(val);
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
t4_ofld_send(adap, skb);
return 0;
}
/* Set one of the t_flags bits in the TCB.
*/
static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
unsigned int ftid, unsigned int bit_pos,
unsigned int val, int no_reply)
{
return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
(unsigned long long)val << bit_pos, no_reply);
}
static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
{
int err;
/* do a set-tcb for smac-sel and CWR bit.. */
err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
if (err)
goto smac_err;
err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
TCB_SMAC_SEL_V(f->smt->idx), 1);
if (!err)
return 0;
smac_err:
dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
f->tid, err);
return err;
}
/* Handle a filter write/deletion reply. */
void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
{
@ -795,19 +868,23 @@ void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
clear_filter(adap, f);
if (ctx)
ctx->result = 0;
} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
idx);
clear_filter(adap, f);
if (ctx)
ctx->result = -ENOMEM;
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
f->pending = 0; /* asynchronous setup completed */
f->valid = 1;
if (ctx) {
ctx->result = 0;
ctx->tid = idx;
int err = 0;
if (f->fs.newsmac)
err = configure_filter_smac(adap, f);
if (!err) {
f->pending = 0; /* async setup completed */
f->valid = 1;
if (ctx) {
ctx->result = 0;
ctx->tid = idx;
}
} else {
clear_filter(adap, f);
if (ctx)
ctx->result = err;
}
} else {
/* Something went wrong. Issue a warning about the

View File

@ -77,6 +77,7 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
#include "smt.h"
#include "sched.h"
#include "cxgb4_tc_u32.h"
#include "cxgb4_tc_flower.h"
@ -563,6 +564,10 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_l2t_write_rpl *p = (void *)rsp;
do_l2t_write_rpl(q->adap, p);
} else if (opcode == CPL_SMT_WRITE_RPL) {
const struct cpl_smt_write_rpl *p = (void *)rsp;
do_smt_write_rpl(q->adap, p);
} else if (opcode == CPL_SET_TCB_RPL) {
const struct cpl_set_tcb_rpl *p = (void *)rsp;
@ -4641,6 +4646,7 @@ static void free_some_resources(struct adapter *adapter)
{
unsigned int i;
kvfree(adapter->smt);
kvfree(adapter->l2t);
t4_cleanup_sched(adapter);
kvfree(adapter->tids.tid_tab);
@ -5067,6 +5073,12 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
cfg_queues(adapter);
adapter->smt = t4_init_smt();
if (!adapter->smt) {
/* We tolerate a lack of SMT, giving up some functionality */
dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
}
adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
if (!adapter->l2t) {
/* We tolerate a lack of L2T, giving up some functionality */

View File

@ -0,0 +1,247 @@
/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "cxgb4.h"
#include "smt.h"
#include "t4_msg.h"
#include "t4fw_api.h"
#include "t4_regs.h"
#include "t4_values.h"
struct smt_data *t4_init_smt(void)
{
unsigned int smt_size;
struct smt_data *s;
int i;
smt_size = SMT_SIZE;
s = kvzalloc(sizeof(*s) + smt_size * sizeof(struct smt_entry),
GFP_KERNEL);
if (!s)
return NULL;
s->smt_size = smt_size;
rwlock_init(&s->lock);
for (i = 0; i < s->smt_size; ++i) {
s->smtab[i].idx = i;
s->smtab[i].state = SMT_STATE_UNUSED;
memset(&s->smtab[i].src_mac, 0, ETH_ALEN);
spin_lock_init(&s->smtab[i].lock);
atomic_set(&s->smtab[i].refcnt, 0);
}
return s;
}
static struct smt_entry *find_or_alloc_smte(struct smt_data *s, u8 *smac)
{
struct smt_entry *first_free = NULL;
struct smt_entry *e, *end;
for (e = &s->smtab[0], end = &s->smtab[s->smt_size]; e != end; ++e) {
if (atomic_read(&e->refcnt) == 0) {
if (!first_free)
first_free = e;
} else {
if (e->state == SMT_STATE_SWITCHING) {
/* This entry is actually in use. See if we can
* re-use it ?
*/
if (memcmp(e->src_mac, smac, ETH_ALEN) == 0)
goto found_reuse;
}
}
}
if (first_free) {
e = first_free;
goto found;
}
return NULL;
found:
e->state = SMT_STATE_UNUSED;
found_reuse:
return e;
}
static void t4_smte_free(struct smt_entry *e)
{
spin_lock_bh(&e->lock);
if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
e->state = SMT_STATE_UNUSED;
}
spin_unlock_bh(&e->lock);
}
/**
* @e: smt entry to release
*
* Releases ref count and frees up an smt entry from SMT table
*/
void cxgb4_smt_release(struct smt_entry *e)
{
if (atomic_dec_and_test(&e->refcnt))
t4_smte_free(e);
}
EXPORT_SYMBOL(cxgb4_smt_release);
void do_smt_write_rpl(struct adapter *adap, const struct cpl_smt_write_rpl *rpl)
{
unsigned int smtidx = TID_TID_G(GET_TID(rpl));
struct smt_data *s = adap->smt;
if (unlikely(rpl->status != CPL_ERR_NONE)) {
struct smt_entry *e = &s->smtab[smtidx];
dev_err(adap->pdev_dev,
"Unexpected SMT_WRITE_RPL status %u for entry %u\n",
rpl->status, smtidx);
spin_lock(&e->lock);
e->state = SMT_STATE_ERROR;
spin_unlock(&e->lock);
return;
}
}
static int write_smt_entry(struct adapter *adapter, struct smt_entry *e)
{
struct cpl_t6_smt_write_req *t6req;
struct smt_data *s = adapter->smt;
struct cpl_smt_write_req *req;
struct sk_buff *skb;
int size;
u8 row;
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
size = sizeof(*req);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* Source MAC Table (SMT) contains 256 SMAC entries
* organized in 128 rows of 2 entries each.
*/
req = (struct cpl_smt_write_req *)__skb_put(skb, size);
INIT_TP_WR(req, 0);
/* Each row contains an SMAC pair.
* LSB selects the SMAC entry within a row
*/
row = (e->idx >> 1);
if (e->idx & 1) {
req->pfvf1 = 0x0;
memcpy(req->src_mac1, e->src_mac, ETH_ALEN);
/* fill pfvf0/src_mac0 with entry
* at prev index from smt-tab.
*/
req->pfvf0 = 0x0;
memcpy(req->src_mac0, s->smtab[e->idx - 1].src_mac,
ETH_ALEN);
} else {
req->pfvf0 = 0x0;
memcpy(req->src_mac0, e->src_mac, ETH_ALEN);
/* fill pfvf1/src_mac1 with entry
* at next index from smt-tab
*/
req->pfvf1 = 0x0;
memcpy(req->src_mac1, s->smtab[e->idx + 1].src_mac,
ETH_ALEN);
}
} else {
size = sizeof(*t6req);
skb = alloc_skb(size, GFP_ATOMIC);
if (!skb)
return -ENOMEM;
/* Source MAC Table (SMT) contains 256 SMAC entries */
t6req = (struct cpl_t6_smt_write_req *)__skb_put(skb, size);
INIT_TP_WR(t6req, 0);
req = (struct cpl_smt_write_req *)t6req;
/* fill pfvf0/src_mac0 from smt-tab */
req->pfvf0 = 0x0;
memcpy(req->src_mac0, s->smtab[e->idx].src_mac, ETH_ALEN);
row = e->idx;
}
OPCODE_TID(req) =
htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, e->idx |
TID_QID_V(adapter->sge.fw_evtq.abs_id)));
req->params = htonl(SMTW_NORPL_V(0) |
SMTW_IDX_V(row) |
SMTW_OVLAN_IDX_V(0));
t4_mgmt_tx(adapter, skb);
return 0;
}
static struct smt_entry *t4_smt_alloc_switching(struct adapter *adap, u16 pfvf,
u8 *smac)
{
struct smt_data *s = adap->smt;
struct smt_entry *e;
write_lock_bh(&s->lock);
e = find_or_alloc_smte(s, smac);
if (e) {
spin_lock(&e->lock);
if (!atomic_read(&e->refcnt)) {
atomic_set(&e->refcnt, 1);
e->state = SMT_STATE_SWITCHING;
e->pfvf = pfvf;
memcpy(e->src_mac, smac, ETH_ALEN);
write_smt_entry(adap, e);
} else {
atomic_inc(&e->refcnt);
}
spin_unlock(&e->lock);
}
write_unlock_bh(&s->lock);
return e;
}
/**
* @dev: net_device pointer
* @smac: MAC address to add to SMT
* Returns pointer to the SMT entry created
*
* Allocates an SMT entry to be used by switching rule of a filter.
*/
struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac)
{
struct adapter *adap = netdev2adap(dev);
return t4_smt_alloc_switching(adap, 0x0, smac);
}
EXPORT_SYMBOL(cxgb4_smt_alloc_switching);

View File

@ -0,0 +1,76 @@
/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_SMT_H
#define __CXGB4_SMT_H
#include <linux/spinlock.h>
#include <linux/if_ether.h>
#include <linux/atomic.h>
struct adapter;
struct cpl_smt_write_rpl;
/* SMT related handling. Heavily adapted based on l2t ops in l2t.h/l2t.c
*/
enum {
SMT_STATE_SWITCHING,
SMT_STATE_UNUSED,
SMT_STATE_ERROR
};
enum {
SMT_SIZE = 256
};
struct smt_entry {
u16 state;
u16 idx;
u16 pfvf;
u8 src_mac[ETH_ALEN];
atomic_t refcnt;
spinlock_t lock; /* protect smt entry add,removal */
};
struct smt_data {
unsigned int smt_size;
rwlock_t lock;
struct smt_entry smtab[0];
};
struct smt_data *t4_init_smt(void);
struct smt_entry *cxgb4_smt_alloc_switching(struct net_device *dev, u8 *smac);
void cxgb4_smt_release(struct smt_entry *e);
void do_smt_write_rpl(struct adapter *p, const struct cpl_smt_write_rpl *rpl);
#endif /* __CXGB4_SMT_H */

View File

@ -50,6 +50,7 @@ enum {
CPL_RX_DATA_ACK = 0xD,
CPL_TX_PKT = 0xE,
CPL_L2T_WRITE_REQ = 0x12,
CPL_SMT_WRITE_REQ = 0x14,
CPL_TID_RELEASE = 0x1A,
CPL_TX_DATA_ISO = 0x1F,
@ -60,6 +61,7 @@ enum {
CPL_PEER_CLOSE = 0x26,
CPL_ABORT_REQ_RSS = 0x2B,
CPL_ABORT_RPL_RSS = 0x2D,
CPL_SMT_WRITE_RPL = 0x2E,
CPL_RX_PHYS_ADDR = 0x30,
CPL_CLOSE_CON_RPL = 0x32,
@ -681,8 +683,8 @@ struct cpl_set_tcb_field {
};
/* cpl_set_tcb_field.word_cookie fields */
#define TCB_WORD_S 0
#define TCB_WORD(x) ((x) << TCB_WORD_S)
#define TCB_WORD_S 0
#define TCB_WORD_V(x) ((x) << TCB_WORD_S)
#define TCB_COOKIE_S 5
#define TCB_COOKIE_M 0x7
@ -1266,6 +1268,44 @@ struct cpl_l2t_write_rpl {
u8 rsvd[3];
};
struct cpl_smt_write_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
__be16 pfvf1;
u8 src_mac1[6];
__be16 pfvf0;
u8 src_mac0[6];
};
struct cpl_t6_smt_write_req {
WR_HDR;
union opcode_tid ot;
__be32 params;
__be64 tag;
__be16 pfvf0;
u8 src_mac0[6];
__be32 local_ip;
__be32 rsvd;
};
struct cpl_smt_write_rpl {
union opcode_tid ot;
u8 status;
u8 rsvd[3];
};
/* cpl_smt_{read,write}_req.params fields */
#define SMTW_OVLAN_IDX_S 16
#define SMTW_OVLAN_IDX_V(x) ((x) << SMTW_OVLAN_IDX_S)
#define SMTW_IDX_S 20
#define SMTW_IDX_V(x) ((x) << SMTW_IDX_S)
#define SMTW_NORPL_S 31
#define SMTW_NORPL_V(x) ((x) << SMTW_NORPL_S)
#define SMTW_NORPL_F SMTW_NORPL_V(1U)
struct cpl_rdma_terminate {
union opcode_tid ot;
__be16 rsvd;

View File

@ -0,0 +1,47 @@
/*
* This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
*
* Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __T4_TCB_H
#define __T4_TCB_H
#define TCB_SMAC_SEL_W 0
#define TCB_SMAC_SEL_S 24
#define TCB_SMAC_SEL_M 0xffULL
#define TCB_SMAC_SEL_V(x) ((x) << TCB_SMAC_SEL_S)
#define TCB_T_FLAGS_W 1
#define TF_CCTRL_CWR_S 61
#endif /* __T4_TCB_H */