[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [RFC PATCH] mm, oom: distinguish blockable mode for mmu notifiers



On Fri 22-06-18 17:13:02, Christian König wrote:
> Hi Michal,
> 
> [Adding Felix as well]
> 
> Well first of all you have a misconception why at least the AMD graphics
> driver need to be able to sleep in an MMU notifier: We need to sleep because
> we need to wait for hardware operations to finish and *NOT* because we need
> to wait for locks.
> 
> I'm not sure if your flag now means that you generally can't sleep in MMU
> notifiers any more, but if that's the case at least AMD hardware will break
> badly. In our case the approach of waiting for a short time for the process
> to be reaped and then select another victim actually sounds like the right
> thing to do.

Well, I do not need to make the notifier code non blocking all the time.
All I need is to ensure that it won't sleep if the flag says so and
return -EAGAIN instead.

So here is what I do for amdgpu:

> > diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c 
> > b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> > index 83e344fbb50a..d138a526feff 100644
> > --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> > +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
> > @@ -136,12 +136,18 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
> >    *
> >    * Take the rmn read side lock.
> >    */
> > -static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
> > +static int amdgpu_mn_read_lock(struct amdgpu_mn *rmn, bool blockable)
> >   {
> > -   mutex_lock(&rmn->read_lock);
> > +   if (blockable)
> > +           mutex_lock(&rmn->read_lock);
> > +   else if (!mutex_trylock(&rmn->read_lock))
> > +           return -EAGAIN;
> > +
> >     if (atomic_inc_return(&rmn->recursion) == 1)
> >             down_read_non_owner(&rmn->lock);
> >     mutex_unlock(&rmn->read_lock);
> > +
> > +   return 0;
> >   }
> >   /**
> > @@ -197,10 +203,11 @@ static void amdgpu_mn_invalidate_node(struct 
> > amdgpu_mn_node *node,
> >    * We block for all BOs between start and end to be idle and
> >    * unmap them by move them into system domain again.
> >    */
> > -static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
> > +static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
> >                                              struct mm_struct *mm,
> >                                              unsigned long start,
> > -                                            unsigned long end)
> > +                                            unsigned long end,
> > +                                            bool blockable)
> >   {
> >     struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
> >     struct interval_tree_node *it;
> > @@ -208,7 +215,11 @@ static void 
> > amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
> >     /* notification is exclusive, but interval is inclusive */
> >     end -= 1;
> > -   amdgpu_mn_read_lock(rmn);
> > +   /* TODO we should be able to split locking for interval tree and
> > +    * amdgpu_mn_invalidate_node
> > +    */
> > +   if (amdgpu_mn_read_lock(rmn, blockable))
> > +           return -EAGAIN;
> >     it = interval_tree_iter_first(&rmn->objects, start, end);
> >     while (it) {
> > @@ -219,6 +230,8 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct 
> > mmu_notifier *mn,
> >             amdgpu_mn_invalidate_node(node, start, end);
> >     }
> > +
> > +   return 0;
> >   }
> >   /**
> > @@ -233,10 +246,11 @@ static void 
> > amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
> >    * necessitates evicting all user-mode queues of the process. The BOs
> >    * are restorted in amdgpu_mn_invalidate_range_end_hsa.
> >    */
> > -static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
> > +static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
> >                                              struct mm_struct *mm,
> >                                              unsigned long start,
> > -                                            unsigned long end)
> > +                                            unsigned long end,
> > +                                            bool blockable)
> >   {
> >     struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
> >     struct interval_tree_node *it;
> > @@ -244,7 +258,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct 
> > mmu_notifier *mn,
> >     /* notification is exclusive, but interval is inclusive */
> >     end -= 1;
> > -   amdgpu_mn_read_lock(rmn);
> > +   if (amdgpu_mn_read_lock(rmn, blockable))
> > +           return -EAGAIN;
> >     it = interval_tree_iter_first(&rmn->objects, start, end);
> >     while (it) {
> > @@ -262,6 +277,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct 
> > mmu_notifier *mn,
> >                             amdgpu_amdkfd_evict_userptr(mem, mm);
> >             }
> >     }
> > +
> > +   return 0;
> >   }
> >   /**
-- 
Michal Hocko
SUSE Labs

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.