drivers/IB,usnic: reduce scope of mmap_sem

usnic_uiom_get_pages() uses gup_longterm() so we cannot really get rid of
mmap_sem altogether in the driver, but we can get rid of some complexity
that mmap_sem brings with only pinned_vm.  We can get rid of the wq
altogether as we no longer need to defer work to unpin pages as the
counter is now atomic. We also share the lock.

Acked-by: Parvi Kaustubhi <pkaustub@cisco.com>
Reviewed-by: Ira Weiny <ira.weiny@intel.com>
Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
Davidlohr Bueso 2019-02-06 09:59:19 -08:00 коммит произвёл Jason Gunthorpe
Родитель 0e15c25336
Коммит 8ea1f989aa
3 изменённых файлов: 6 добавлений и 55 удалений

Просмотреть файл

@ -684,7 +684,6 @@ out_unreg_netdev_notifier:
out_pci_unreg: out_pci_unreg:
pci_unregister_driver(&usnic_ib_pci_driver); pci_unregister_driver(&usnic_ib_pci_driver);
out_umem_fini: out_umem_fini:
usnic_uiom_fini();
return err; return err;
} }
@ -697,7 +696,6 @@ static void __exit usnic_ib_destroy(void)
unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier); unregister_inetaddr_notifier(&usnic_ib_inetaddr_notifier);
unregister_netdevice_notifier(&usnic_ib_netdevice_notifier); unregister_netdevice_notifier(&usnic_ib_netdevice_notifier);
pci_unregister_driver(&usnic_ib_pci_driver); pci_unregister_driver(&usnic_ib_pci_driver);
usnic_uiom_fini();
} }
MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver"); MODULE_DESCRIPTION("Cisco VIC (usNIC) Verbs Driver");

Просмотреть файл

@ -47,8 +47,6 @@
#include "usnic_uiom.h" #include "usnic_uiom.h"
#include "usnic_uiom_interval_tree.h" #include "usnic_uiom_interval_tree.h"
static struct workqueue_struct *usnic_uiom_wq;
#define USNIC_UIOM_PAGE_CHUNK \ #define USNIC_UIOM_PAGE_CHUNK \
((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
@ -127,9 +125,9 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
uiomr->owning_mm = mm = current->mm; uiomr->owning_mm = mm = current->mm;
down_write(&mm->mmap_sem); down_read(&mm->mmap_sem);
locked = npages + atomic64_read(&current->mm->pinned_vm); locked = atomic64_add_return(npages, &current->mm->pinned_vm);
lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
@ -184,14 +182,13 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
} }
out: out:
if (ret < 0) if (ret < 0) {
usnic_uiom_put_pages(chunk_list, 0); usnic_uiom_put_pages(chunk_list, 0);
else { atomic64_sub(npages, &current->mm->pinned_vm);
atomic64_set(&mm->pinned_vm, locked); } else
mmgrab(uiomr->owning_mm); mmgrab(uiomr->owning_mm);
}
up_write(&mm->mmap_sem); up_read(&mm->mmap_sem);
free_page((unsigned long) page_list); free_page((unsigned long) page_list);
return ret; return ret;
} }
@ -435,43 +432,12 @@ static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
} }
static void usnic_uiom_release_defer(struct work_struct *work)
{
struct usnic_uiom_reg *uiomr =
container_of(work, struct usnic_uiom_reg, work);
down_write(&uiomr->owning_mm->mmap_sem);
atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr);
}
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
struct ib_ucontext *context) struct ib_ucontext *context)
{ {
__usnic_uiom_reg_release(uiomr->pd, uiomr, 1); __usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
/*
* We may be called with the mm's mmap_sem already held. This
* can happen when a userspace munmap() is the call that drops
* the last reference to our file and calls our release
* method. If there are memory regions to destroy, we'll end
* up here and not be able to take the mmap_sem. In that case
* we defer the vm_locked accounting to a workqueue.
*/
if (context->closing) {
if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
queue_work(usnic_uiom_wq, &uiomr->work);
return;
}
} else {
down_write(&uiomr->owning_mm->mmap_sem);
}
atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm); atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
up_write(&uiomr->owning_mm->mmap_sem);
__usnic_uiom_release_tail(uiomr); __usnic_uiom_release_tail(uiomr);
} }
@ -600,17 +566,5 @@ int usnic_uiom_init(char *drv_name)
return -EPERM; return -EPERM;
} }
usnic_uiom_wq = create_workqueue(drv_name);
if (!usnic_uiom_wq) {
usnic_err("Unable to alloc wq for drv %s\n", drv_name);
return -ENOMEM;
}
return 0; return 0;
} }
void usnic_uiom_fini(void)
{
flush_workqueue(usnic_uiom_wq);
destroy_workqueue(usnic_uiom_wq);
}

Просмотреть файл

@ -93,5 +93,4 @@ struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
struct ib_ucontext *ucontext); struct ib_ucontext *ucontext);
int usnic_uiom_init(char *drv_name); int usnic_uiom_init(char *drv_name);
void usnic_uiom_fini(void);
#endif /* USNIC_UIOM_H_ */ #endif /* USNIC_UIOM_H_ */