mirror of https://gitee.com/openkylin/linux.git
IB/hfi1: Specify mm when releasing pages
This change adds a pointer to the process mm_struct when calling hfi1_release_user_pages(). Previously, the function used the mm_struct of the current process to adjust the number of pinned pages. However, is some cases, namely when unpinning pages due to a MMU notifier call, we want to drop into that code block as it will cause a deadlock (the MMU notifiers take the process' mmap_sem prior to calling the callbacks). By allowing to caller to specify the pointer to the mm_struct, the caller has finer control over that part of hfi1_release_user_pages(). Reviewed-by: Dennis Dalessandro <dennis.dalessandro@intel.com> Reviewed-by: Dean Luick <dean.luick@intel.com> Signed-off-by: Mitko Haralanov <mitko.haralanov@intel.com> Signed-off-by: Jubin John <jubin.john@intel.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
2c97ce4f3c
commit
bd3a8947de
|
@ -1666,7 +1666,7 @@ void shutdown_led_override(struct hfi1_pportdata *ppd);
|
||||||
|
|
||||||
bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
|
bool hfi1_can_pin_pages(struct hfi1_devdata *, u32, u32);
|
||||||
int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
|
int hfi1_acquire_user_pages(unsigned long, size_t, bool, struct page **);
|
||||||
void hfi1_release_user_pages(struct page **, size_t, bool);
|
void hfi1_release_user_pages(struct mm_struct *, struct page **, size_t, bool);
|
||||||
|
|
||||||
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
|
static inline void clear_rcvhdrtail(const struct hfi1_ctxtdata *rcd)
|
||||||
{
|
{
|
||||||
|
|
|
@ -550,7 +550,7 @@ int hfi1_user_exp_rcv_setup(struct file *fp, struct hfi1_tid_info *tinfo)
|
||||||
* for example), unpin all unmapped pages so we can pin them nex time.
|
* for example), unpin all unmapped pages so we can pin them nex time.
|
||||||
*/
|
*/
|
||||||
if (mapped_pages != pinned)
|
if (mapped_pages != pinned)
|
||||||
hfi1_release_user_pages(&pages[mapped_pages],
|
hfi1_release_user_pages(current->mm, &pages[mapped_pages],
|
||||||
pinned - mapped_pages,
|
pinned - mapped_pages,
|
||||||
false);
|
false);
|
||||||
bail:
|
bail:
|
||||||
|
@ -923,7 +923,7 @@ static void clear_tid_node(struct hfi1_filedata *fd, u16 subctxt,
|
||||||
|
|
||||||
pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
|
pci_unmap_single(dd->pcidev, node->dma_addr, node->mmu.len,
|
||||||
PCI_DMA_FROMDEVICE);
|
PCI_DMA_FROMDEVICE);
|
||||||
hfi1_release_user_pages(node->pages, node->npages, true);
|
hfi1_release_user_pages(current->mm, node->pages, node->npages, true);
|
||||||
|
|
||||||
node->grp->used--;
|
node->grp->used--;
|
||||||
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
|
node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
|
||||||
|
|
|
@ -116,7 +116,8 @@ int hfi1_acquire_user_pages(unsigned long vaddr, size_t npages, bool writable,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
|
void hfi1_release_user_pages(struct mm_struct *mm, struct page **p,
|
||||||
|
size_t npages, bool dirty)
|
||||||
{
|
{
|
||||||
size_t i;
|
size_t i;
|
||||||
|
|
||||||
|
@ -126,9 +127,9 @@ void hfi1_release_user_pages(struct page **p, size_t npages, bool dirty)
|
||||||
put_page(p[i]);
|
put_page(p[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (current->mm) { /* during close after signal, mm can be NULL */
|
if (mm) { /* during close after signal, mm can be NULL */
|
||||||
down_write(¤t->mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
current->mm->pinned_vm -= npages;
|
mm->pinned_vm -= npages;
|
||||||
up_write(¤t->mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -277,7 +277,7 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
|
||||||
static void user_sdma_free_request(struct user_sdma_request *, bool);
|
static void user_sdma_free_request(struct user_sdma_request *, bool);
|
||||||
static int pin_vector_pages(struct user_sdma_request *,
|
static int pin_vector_pages(struct user_sdma_request *,
|
||||||
struct user_sdma_iovec *);
|
struct user_sdma_iovec *);
|
||||||
static void unpin_vector_pages(struct page **, unsigned);
|
static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
|
||||||
static int check_header_template(struct user_sdma_request *,
|
static int check_header_template(struct user_sdma_request *,
|
||||||
struct hfi1_pkt_header *, u32, u32);
|
struct hfi1_pkt_header *, u32, u32);
|
||||||
static int set_txreq_header(struct user_sdma_request *,
|
static int set_txreq_header(struct user_sdma_request *,
|
||||||
|
@ -1072,7 +1072,7 @@ static int pin_vector_pages(struct user_sdma_request *req,
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
if (pinned != npages) {
|
if (pinned != npages) {
|
||||||
unpin_vector_pages(pages, pinned);
|
unpin_vector_pages(current->mm, pages, pinned);
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
@ -1097,9 +1097,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void unpin_vector_pages(struct page **pages, unsigned npages)
|
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
|
||||||
|
unsigned npages)
|
||||||
{
|
{
|
||||||
hfi1_release_user_pages(pages, npages, 0);
|
hfi1_release_user_pages(mm, pages, npages, 0);
|
||||||
kfree(pages);
|
kfree(pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1502,8 +1503,14 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
|
||||||
struct sdma_mmu_node *node =
|
struct sdma_mmu_node *node =
|
||||||
container_of(mnode, struct sdma_mmu_node, rb);
|
container_of(mnode, struct sdma_mmu_node, rb);
|
||||||
|
|
||||||
if (!notifier)
|
unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
|
||||||
unpin_vector_pages(node->pages, node->npages);
|
node->npages);
|
||||||
|
/*
|
||||||
|
* If called by the MMU notifier, we have to adjust the pinned
|
||||||
|
* page count ourselves.
|
||||||
|
*/
|
||||||
|
if (notifier)
|
||||||
|
current->mm->pinned_vm -= node->npages;
|
||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue