RDMA/siw: Fix DEFINE_PER_CPU compilation when ARCH_NEEDS_WEAK_PER_CPU
The initializer for the variable cannot be inside the macro (and zero initialization isn't needed anyhow). include/linux/percpu-defs.h:92:33: warning: '__pcpu_unique_use_cnt' initialized and declared 'extern' extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \ ^~~~~~~~~~~~~~ include/linux/percpu-defs.h:115:2: note: in expansion of macro 'DEFINE_PER_CPU_SECTION' DEFINE_PER_CPU_SECTION(type, name, "") ^~~~~~~~~~~~~~~~~~~~~~ drivers/infiniband/sw/siw/siw_main.c:129:8: note: in expansion of macro 'DEFINE_PER_CPU' static DEFINE_PER_CPU(atomic_t, use_cnt = ATOMIC_INIT(0)); ^~~~~~~~~~~~~~ Also the rules for PER_CPU require the variable names to be globally unique, so prefix them with siw_ Fixes:b9be6f18cf
("rdma/siw: transmit path") Fixes:bdcf26bf9b
("rdma/siw: network and RDMA core interface") Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
d3e5397169
commit
4c7d6dcd36
|
@ -126,7 +126,7 @@ static int siw_dev_qualified(struct net_device *netdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(atomic_t, use_cnt = ATOMIC_INIT(0));
|
||||
static DEFINE_PER_CPU(atomic_t, siw_use_cnt);
|
||||
|
||||
static struct {
|
||||
struct cpumask **tx_valid_cpus;
|
||||
|
@ -215,7 +215,7 @@ int siw_get_tx_cpu(struct siw_device *sdev)
|
|||
if (!siw_tx_thread[cpu])
|
||||
continue;
|
||||
|
||||
usage = atomic_read(&per_cpu(use_cnt, cpu));
|
||||
usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
|
||||
if (usage <= min_use) {
|
||||
tx_cpu = cpu;
|
||||
min_use = usage;
|
||||
|
@ -226,7 +226,7 @@ int siw_get_tx_cpu(struct siw_device *sdev)
|
|||
|
||||
out:
|
||||
if (tx_cpu >= 0)
|
||||
atomic_inc(&per_cpu(use_cnt, tx_cpu));
|
||||
atomic_inc(&per_cpu(siw_use_cnt, tx_cpu));
|
||||
else
|
||||
pr_warn("siw: no tx cpu found\n");
|
||||
|
||||
|
@ -235,7 +235,7 @@ int siw_get_tx_cpu(struct siw_device *sdev)
|
|||
|
||||
void siw_put_tx_cpu(int cpu)
|
||||
{
|
||||
atomic_dec(&per_cpu(use_cnt, cpu));
|
||||
atomic_dec(&per_cpu(siw_use_cnt, cpu));
|
||||
}
|
||||
|
||||
static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
|
||||
|
|
|
@ -1183,12 +1183,12 @@ struct tx_task_t {
|
|||
wait_queue_head_t waiting;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct tx_task_t, tx_task_g);
|
||||
static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
|
||||
|
||||
void siw_stop_tx_thread(int nr_cpu)
|
||||
{
|
||||
kthread_stop(siw_tx_thread[nr_cpu]);
|
||||
wake_up(&per_cpu(tx_task_g, nr_cpu).waiting);
|
||||
wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
|
||||
}
|
||||
|
||||
int siw_run_sq(void *data)
|
||||
|
@ -1196,7 +1196,7 @@ int siw_run_sq(void *data)
|
|||
const int nr_cpu = (unsigned int)(long)data;
|
||||
struct llist_node *active;
|
||||
struct siw_qp *qp;
|
||||
struct tx_task_t *tx_task = &per_cpu(tx_task_g, nr_cpu);
|
||||
struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
|
||||
|
||||
init_llist_head(&tx_task->active);
|
||||
init_waitqueue_head(&tx_task->waiting);
|
||||
|
@ -1261,9 +1261,9 @@ int siw_sq_start(struct siw_qp *qp)
|
|||
}
|
||||
siw_qp_get(qp);
|
||||
|
||||
llist_add(&qp->tx_list, &per_cpu(tx_task_g, qp->tx_cpu).active);
|
||||
llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
|
||||
|
||||
wake_up(&per_cpu(tx_task_g, qp->tx_cpu).waiting);
|
||||
wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue