hmm: use mmu_notifier_get/put for 'struct hmm'
This is a significant simplification, it eliminates all the remaining 'hmm' stuff in mm_struct, eliminates krefing along the critical notifier paths, and takes away all the ugly locking and abuse of page_table_lock. mmu_notifier_get() provides the single struct hmm per struct mm which eliminates mm->hmm. It also directly guarantees that no mmu_notifier op callback is callable while concurrent free is possible, this eliminates all the krefs inside the mmu_notifier callbacks. The remaining krefs in the range code were overly cautious, drivers are already not permitted to free the mirror while a range exists. Link: https://lore.kernel.org/r/20190806231548.25242-6-jgg@ziepe.ca Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Ralph Campbell <rcampbell@nvidia.com> Tested-by: Ralph Campbell <rcampbell@nvidia.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
@@ -84,15 +84,12 @@
|
||||
* @notifiers: count of active mmu notifiers
|
||||
*/
|
||||
struct hmm {
|
||||
struct mm_struct *mm;
|
||||
struct kref kref;
|
||||
struct mmu_notifier mmu_notifier;
|
||||
spinlock_t ranges_lock;
|
||||
struct list_head ranges;
|
||||
struct list_head mirrors;
|
||||
struct mmu_notifier mmu_notifier;
|
||||
struct rw_semaphore mirrors_sem;
|
||||
wait_queue_head_t wq;
|
||||
struct rcu_head rcu;
|
||||
long notifiers;
|
||||
};
|
||||
|
||||
@@ -409,13 +406,6 @@ long hmm_range_dma_unmap(struct hmm_range *range,
|
||||
*/
|
||||
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
|
||||
|
||||
/* Below are for HMM internal use only! Not to be used by device driver! */
|
||||
static inline void hmm_mm_init(struct mm_struct *mm)
|
||||
{
|
||||
mm->hmm = NULL;
|
||||
}
|
||||
#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
|
||||
static inline void hmm_mm_init(struct mm_struct *mm) {}
|
||||
#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
|
||||
|
||||
#endif /* LINUX_HMM_H */
|
||||
|
Reference in New Issue
Block a user