mirror of https://gitee.com/openkylin/linux.git
Pull request for 4.20-rc
- One performance regression for hfi1 - One kasan fix for hfi1 - A couple mlx5 fixes - A core oops fix -----BEGIN PGP SIGNATURE----- iQIcBAABAgAGBQJcEo1oAAoJELgmozMOVy/dUr8QAJiAsiUxm6F5KDLubocvNpLp rGNi43Rc1wFOKg0c4epPCxU2vQSfuIeERCCdPSX/vTvpzKjgtXN3+PP0iPyn2mUV 88MFAG/HE0QOnyyDYCa8nIWNKLnuFiJ0eaEBWRT1vVFgBpVS1zSriT63+ZEslGC+ PoB2EQiwCY/ORpZp967r08LwdiFHk7czYgadtNkOPvBceF5raeKH74rJ65l9Nb/T JmW7liMmXbq1rbLBf6/aC134Q8NTHRKLb+MfkjYjUph8qRQ6Pj4XK5mA/m+bR6L8 Tj8H+JUZG4DNpiHrVr5LmVIS7uW1B6L59iH7c98m41gC/TOQKxC5cuee5I1QS4QX xIT5gqW/dKJ9vmnjfDqPUzFjxg+RlaHytHry85/51+cjltWwszcR9gW1mjqysSwp 1Knm2emB/DGyr9DMTO592gVuDaJtWGKgKKcXOr9H4LqmQftczyWfgINr5SXQ//Ej PTO8PtHwPV2DHd8qkeQjgB2U5TB60wthB9akeqY+mgNe7A147abGWDhWNRSFIHEy G+5jX6iwng1VNe90Qd1g3vaLhmmbcwOIs7lip94OxePrx1NMAqiRwv6LvUMM1GOs dm4fLXbj1aWeMZ+x8IYGSnpxjPIdCe4gQAeZ5WJRxkhv/x+xkNXoT6jqhqS7qeDO 8QP87gemR/LuISS7TTGL =P1FB -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma Pull rdma fixes from Doug Ledford: "We have 5 small fixes for this pull request. One is a performance regression, so not necessarily strictly a fix, but it was small and reasonable and claimed to avoid thrashing in the scheduler, so I took it. The remaining are all legitimate fixes that match the "we take fixes any time" criteria. Summary: - One performance regression for hfi1 - One kasan fix for hfi1 - A couple mlx5 fixes - A core oops fix" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: IB/core: Fix oops in netdev_next_upper_dev_rcu() IB/mlx5: Block DEVX umem from the non applicable cases IB/mlx5: Fix implicit ODP interrupted page fault IB/hfi1: Fix an out-of-bounds access in get_hw_stats IB/hfi1: Fix a latency issue for small messages
This commit is contained in:
commit
e10db791bf
|
@ -267,6 +267,9 @@ is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
|
||||||
struct net_device *cookie_ndev = cookie;
|
struct net_device *cookie_ndev = cookie;
|
||||||
bool match = false;
|
bool match = false;
|
||||||
|
|
||||||
|
if (!rdma_ndev)
|
||||||
|
return false;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (netif_is_bond_master(cookie_ndev) &&
|
if (netif_is_bond_master(cookie_ndev) &&
|
||||||
rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
|
rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
|
||||||
|
|
|
@ -12500,7 +12500,8 @@ static int init_cntrs(struct hfi1_devdata *dd)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* allocate space for the counter values */
|
/* allocate space for the counter values */
|
||||||
dd->cntrs = kcalloc(dd->ndevcntrs, sizeof(u64), GFP_KERNEL);
|
dd->cntrs = kcalloc(dd->ndevcntrs + num_driver_cntrs, sizeof(u64),
|
||||||
|
GFP_KERNEL);
|
||||||
if (!dd->cntrs)
|
if (!dd->cntrs)
|
||||||
goto bail;
|
goto bail;
|
||||||
|
|
||||||
|
|
|
@ -155,6 +155,8 @@ struct hfi1_ib_stats {
|
||||||
extern struct hfi1_ib_stats hfi1_stats;
|
extern struct hfi1_ib_stats hfi1_stats;
|
||||||
extern const struct pci_error_handlers hfi1_pci_err_handler;
|
extern const struct pci_error_handlers hfi1_pci_err_handler;
|
||||||
|
|
||||||
|
extern int num_driver_cntrs;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* First-cut criterion for "device is active" is
|
* First-cut criterion for "device is active" is
|
||||||
* two thousand dwords combined Tx, Rx traffic per
|
* two thousand dwords combined Tx, Rx traffic per
|
||||||
|
|
|
@ -340,6 +340,13 @@ int hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send)
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* System latency between send and schedule is large enough that
|
||||||
|
* forcing call_send to true for piothreshold packets is necessary.
|
||||||
|
*/
|
||||||
|
if (wqe->length <= piothreshold)
|
||||||
|
*call_send = true;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1479,7 +1479,7 @@ static const char * const driver_cntr_names[] = {
|
||||||
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
|
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
|
||||||
static const char **dev_cntr_names;
|
static const char **dev_cntr_names;
|
||||||
static const char **port_cntr_names;
|
static const char **port_cntr_names;
|
||||||
static int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
|
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
|
||||||
static int num_dev_cntrs;
|
static int num_dev_cntrs;
|
||||||
static int num_port_cntrs;
|
static int num_port_cntrs;
|
||||||
static int cntr_names_initialized;
|
static int cntr_names_initialized;
|
||||||
|
|
|
@ -1066,7 +1066,9 @@ static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
|
||||||
|
|
||||||
err = uverbs_get_flags32(&access, attrs,
|
err = uverbs_get_flags32(&access, attrs,
|
||||||
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
|
MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
|
||||||
IB_ACCESS_SUPPORTED);
|
IB_ACCESS_LOCAL_WRITE |
|
||||||
|
IB_ACCESS_REMOTE_WRITE |
|
||||||
|
IB_ACCESS_REMOTE_READ);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
|
|
|
@ -506,14 +506,13 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
|
||||||
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||||
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
|
u64 io_virt, size_t bcnt, u32 *bytes_mapped)
|
||||||
{
|
{
|
||||||
|
int npages = 0, current_seq, page_shift, ret, np;
|
||||||
|
bool implicit = false;
|
||||||
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
struct ib_umem_odp *odp_mr = to_ib_umem_odp(mr->umem);
|
||||||
u64 access_mask = ODP_READ_ALLOWED_BIT;
|
u64 access_mask = ODP_READ_ALLOWED_BIT;
|
||||||
int npages = 0, page_shift, np;
|
|
||||||
u64 start_idx, page_mask;
|
u64 start_idx, page_mask;
|
||||||
struct ib_umem_odp *odp;
|
struct ib_umem_odp *odp;
|
||||||
int current_seq;
|
|
||||||
size_t size;
|
size_t size;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if (!odp_mr->page_list) {
|
if (!odp_mr->page_list) {
|
||||||
odp = implicit_mr_get_data(mr, io_virt, bcnt);
|
odp = implicit_mr_get_data(mr, io_virt, bcnt);
|
||||||
|
@ -521,7 +520,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||||
if (IS_ERR(odp))
|
if (IS_ERR(odp))
|
||||||
return PTR_ERR(odp);
|
return PTR_ERR(odp);
|
||||||
mr = odp->private;
|
mr = odp->private;
|
||||||
|
implicit = true;
|
||||||
} else {
|
} else {
|
||||||
odp = odp_mr;
|
odp = odp_mr;
|
||||||
}
|
}
|
||||||
|
@ -600,7 +599,7 @@ static int pagefault_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
if (mr->parent || !odp->dying) {
|
if (implicit || !odp->dying) {
|
||||||
unsigned long timeout =
|
unsigned long timeout =
|
||||||
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
|
msecs_to_jiffies(MMU_NOTIFIER_TIMEOUT);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue