mirror of https://gitee.com/openkylin/linux.git
mlx5: use RCU lock in mlx5_eq_cq_get()
mlx5_eq_cq_get() is called in IRQ handler, the spinlock inside gets a lot of contentions when we test some heavy workload with 60 RX queues and 80 CPU's, and it is clearly shown in the flame graph. In fact, radix_tree_lookup() is perfectly fine with RCU read lock, we don't have to take a spinlock on this hot path. This is pretty much similar to commit291c566a28
("net/mlx4_core: Fix racy CQ (Completion Queue) free"). Slow paths are still serialized with the spinlock, and with synchronize_irq() it should be safe to just move the fast path to RCU read lock. This patch itself reduces the latency by about 50% for our memcached workload on a 4.14 kernel we test. In upstream, as pointed out by Saeed, this spinlock gets some rework in commit02d92f7903
("net/mlx5: CQ Database per EQ"), so the difference could be smaller. Cc: Saeed Mahameed <saeedm@mellanox.com> Cc: Tariq Toukan <tariqt@mellanox.com> Acked-by: Saeed Mahameed <saeedm@mellanox.com> Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
46861e3e88
commit
1fbf1252df
|
@ -114,11 +114,11 @@ static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
|
||||||
struct mlx5_cq_table *table = &eq->cq_table;
|
struct mlx5_cq_table *table = &eq->cq_table;
|
||||||
struct mlx5_core_cq *cq = NULL;
|
struct mlx5_core_cq *cq = NULL;
|
||||||
|
|
||||||
spin_lock(&table->lock);
|
rcu_read_lock();
|
||||||
cq = radix_tree_lookup(&table->tree, cqn);
|
cq = radix_tree_lookup(&table->tree, cqn);
|
||||||
if (likely(cq))
|
if (likely(cq))
|
||||||
mlx5_cq_hold(cq);
|
mlx5_cq_hold(cq);
|
||||||
spin_unlock(&table->lock);
|
rcu_read_unlock();
|
||||||
|
|
||||||
return cq;
|
return cq;
|
||||||
}
|
}
|
||||||
|
@ -371,9 +371,9 @@ int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
||||||
struct mlx5_cq_table *table = &eq->cq_table;
|
struct mlx5_cq_table *table = &eq->cq_table;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
spin_lock_irq(&table->lock);
|
spin_lock(&table->lock);
|
||||||
err = radix_tree_insert(&table->tree, cq->cqn, cq);
|
err = radix_tree_insert(&table->tree, cq->cqn, cq);
|
||||||
spin_unlock_irq(&table->lock);
|
spin_unlock(&table->lock);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -383,9 +383,9 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
|
||||||
struct mlx5_cq_table *table = &eq->cq_table;
|
struct mlx5_cq_table *table = &eq->cq_table;
|
||||||
struct mlx5_core_cq *tmp;
|
struct mlx5_core_cq *tmp;
|
||||||
|
|
||||||
spin_lock_irq(&table->lock);
|
spin_lock(&table->lock);
|
||||||
tmp = radix_tree_delete(&table->tree, cq->cqn);
|
tmp = radix_tree_delete(&table->tree, cq->cqn);
|
||||||
spin_unlock_irq(&table->lock);
|
spin_unlock(&table->lock);
|
||||||
|
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
|
mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
|
||||||
|
|
Loading…
Reference in New Issue