LCOV - code coverage report
Current view: top level - drivers/gpu/drm/i915/gem - i915_gem_shrinker.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 0 196 0.0 %
Date: 2022-04-01 13:59:58 Functions: 0 16 0.0 %
Branches: 0 121 0.0 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * SPDX-License-Identifier: MIT
       3                 :            :  *
       4                 :            :  * Copyright © 2008-2015 Intel Corporation
       5                 :            :  */
       6                 :            : 
       7                 :            : #include <linux/oom.h>
       8                 :            : #include <linux/sched/mm.h>
       9                 :            : #include <linux/shmem_fs.h>
      10                 :            : #include <linux/slab.h>
      11                 :            : #include <linux/swap.h>
      12                 :            : #include <linux/pci.h>
      13                 :            : #include <linux/dma-buf.h>
      14                 :            : #include <linux/vmalloc.h>
      15                 :            : #include <drm/i915_drm.h>
      16                 :            : 
      17                 :            : #include "i915_trace.h"
      18                 :            : 
      19                 :          0 : static bool swap_available(void)
      20                 :            : {
      21                 :          0 :         return get_nr_swap_pages() > 0;
      22                 :            : }
      23                 :            : 
      24                 :          0 : static bool can_release_pages(struct drm_i915_gem_object *obj)
      25                 :            : {
      26                 :            :         /* Consider only shrinkable ojects. */
      27         [ #  # ]:          0 :         if (!i915_gem_object_is_shrinkable(obj))
      28                 :            :                 return false;
      29                 :            : 
      30                 :            :         /*
      31                 :            :          * Only report true if by unbinding the object and putting its pages
      32                 :            :          * we can actually make forward progress towards freeing physical
      33                 :            :          * pages.
      34                 :            :          *
      35                 :            :          * If the pages are pinned for any other reason than being bound
      36                 :            :          * to the GPU, simply unbinding from the GPU is not going to succeed
      37                 :            :          * in releasing our pin count on the pages themselves.
      38                 :            :          */
      39         [ #  # ]:          0 :         if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count))
      40                 :            :                 return false;
      41                 :            : 
      42                 :            :         /*
      43                 :            :          * We can only return physical pages to the system if we can either
      44                 :            :          * discard the contents (because the user has marked them as being
      45                 :            :          * purgeable) or if we can move their contents out to swap.
      46                 :            :          */
      47   [ #  #  #  # ]:          0 :         return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
      48                 :            : }
      49                 :            : 
      50                 :          0 : static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
      51                 :            :                               unsigned long shrink)
      52                 :            : {
      53                 :          0 :         unsigned long flags;
      54                 :            : 
      55                 :          0 :         flags = 0;
      56         [ #  # ]:          0 :         if (shrink & I915_SHRINK_ACTIVE)
      57                 :          0 :                 flags = I915_GEM_OBJECT_UNBIND_ACTIVE;
      58                 :            : 
      59         [ #  # ]:          0 :         if (i915_gem_object_unbind(obj, flags) == 0)
      60                 :          0 :                 __i915_gem_object_put_pages(obj);
      61                 :            : 
      62         [ #  # ]:          0 :         return !i915_gem_object_has_pages(obj);
      63                 :            : }
      64                 :            : 
      65                 :          0 : static void try_to_writeback(struct drm_i915_gem_object *obj,
      66                 :            :                              unsigned int flags)
      67                 :            : {
      68      [ #  #  # ]:          0 :         switch (obj->mm.madv) {
      69                 :          0 :         case I915_MADV_DONTNEED:
      70                 :          0 :                 i915_gem_object_truncate(obj);
      71                 :            :         case __I915_MADV_PURGED:
      72                 :            :                 return;
      73                 :            :         }
      74                 :            : 
      75         [ #  # ]:          0 :         if (flags & I915_SHRINK_WRITEBACK)
      76                 :          0 :                 i915_gem_object_writeback(obj);
      77                 :            : }
      78                 :            : 
      79                 :            : /**
      80                 :            :  * i915_gem_shrink - Shrink buffer object caches
      81                 :            :  * @i915: i915 device
      82                 :            :  * @target: amount of memory to make available, in pages
      83                 :            :  * @nr_scanned: optional output for number of pages scanned (incremental)
      84                 :            :  * @shrink: control flags for selecting cache types
      85                 :            :  *
      86                 :            :  * This function is the main interface to the shrinker. It will try to release
      87                 :            :  * up to @target pages of main memory backing storage from buffer objects.
      88                 :            :  * Selection of the specific caches can be done with @flags. This is e.g. useful
      89                 :            :  * when purgeable objects should be removed from caches preferentially.
      90                 :            :  *
      91                 :            :  * Note that it's not guaranteed that released amount is actually available as
      92                 :            :  * free system memory - the pages might still be in-used to due to other reasons
      93                 :            :  * (like cpu mmaps) or the mm core has reused them before we could grab them.
      94                 :            :  * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
      95                 :            :  * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
      96                 :            :  *
      97                 :            :  * Also note that any kind of pinning (both per-vma address space pins and
      98                 :            :  * backing storage pins at the buffer object level) result in the shrinker code
      99                 :            :  * having to skip the object.
     100                 :            :  *
     101                 :            :  * Returns:
     102                 :            :  * The number of pages of backing storage actually released.
     103                 :            :  */
     104                 :            : unsigned long
     105                 :          0 : i915_gem_shrink(struct drm_i915_private *i915,
     106                 :            :                 unsigned long target,
     107                 :            :                 unsigned long *nr_scanned,
     108                 :            :                 unsigned int shrink)
     109                 :            : {
     110                 :          0 :         const struct {
     111                 :            :                 struct list_head *list;
     112                 :            :                 unsigned int bit;
     113                 :          0 :         } phases[] = {
     114                 :          0 :                 { &i915->mm.purge_list, ~0u },
     115                 :            :                 {
     116                 :          0 :                         &i915->mm.shrink_list,
     117                 :            :                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
     118                 :            :                 },
     119                 :            :                 { NULL, 0 },
     120                 :            :         }, *phase;
     121                 :          0 :         intel_wakeref_t wakeref = 0;
     122                 :          0 :         unsigned long count = 0;
     123                 :          0 :         unsigned long scanned = 0;
     124                 :            : 
     125                 :            :         /*
     126                 :            :          * When shrinking the active list, we should also consider active
     127                 :            :          * contexts. Active contexts are pinned until they are retired, and
     128                 :            :          * so can not be simply unbound to retire and unpin their pages. To
     129                 :            :          * shrink the contexts, we must wait until the gpu is idle and
     130                 :            :          * completed its switch to the kernel context. In short, we do
     131                 :            :          * not have a good mechanism for idling a specific context.
     132                 :            :          */
     133                 :            : 
     134                 :          0 :         trace_i915_gem_shrink(i915, target, shrink);
     135                 :            : 
     136                 :            :         /*
     137                 :            :          * Unbinding of objects will require HW access; Let us not wake the
     138                 :            :          * device just to recover a little memory. If absolutely necessary,
     139                 :            :          * we will force the wake during oom-notifier.
     140                 :            :          */
     141         [ #  # ]:          0 :         if (shrink & I915_SHRINK_BOUND) {
     142                 :          0 :                 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
     143         [ #  # ]:          0 :                 if (!wakeref)
     144                 :          0 :                         shrink &= ~I915_SHRINK_BOUND;
     145                 :            :         }
     146                 :            : 
     147                 :            :         /*
     148                 :            :          * As we may completely rewrite the (un)bound list whilst unbinding
     149                 :            :          * (due to retiring requests) we have to strictly process only
     150                 :            :          * one element of the list at the time, and recheck the list
     151                 :            :          * on every iteration.
     152                 :            :          *
     153                 :            :          * In particular, we must hold a reference whilst removing the
     154                 :            :          * object as we may end up waiting for and/or retiring the objects.
     155                 :            :          * This might release the final reference (held by the active list)
     156                 :            :          * and result in the object being freed from under us. This is
     157                 :            :          * similar to the precautions the eviction code must take whilst
     158                 :            :          * removing objects.
     159                 :            :          *
     160                 :            :          * Also note that although these lists do not hold a reference to
     161                 :            :          * the object we can safely grab one here: The final object
     162                 :            :          * unreferencing and the bound_list are both protected by the
     163                 :            :          * dev->struct_mutex and so we won't ever be able to observe an
     164                 :            :          * object on the bound_list with a reference count equals 0.
     165                 :            :          */
     166         [ #  # ]:          0 :         for (phase = phases; phase->list; phase++) {
     167                 :          0 :                 struct list_head still_in_list;
     168                 :          0 :                 struct drm_i915_gem_object *obj;
     169                 :          0 :                 unsigned long flags;
     170                 :            : 
     171         [ #  # ]:          0 :                 if ((shrink & phase->bit) == 0)
     172                 :          0 :                         continue;
     173                 :            : 
     174                 :          0 :                 INIT_LIST_HEAD(&still_in_list);
     175                 :            : 
     176                 :            :                 /*
     177                 :            :                  * We serialize our access to unreferenced objects through
     178                 :            :                  * the use of the struct_mutex. While the objects are not
     179                 :            :                  * yet freed (due to RCU then a workqueue) we still want
     180                 :            :                  * to be able to shrink their pages, so they remain on
     181                 :            :                  * the unbound/bound list until actually freed.
     182                 :            :                  */
     183                 :          0 :                 spin_lock_irqsave(&i915->mm.obj_lock, flags);
     184                 :          0 :                 while (count < target &&
     185   [ #  #  #  # ]:          0 :                        (obj = list_first_entry_or_null(phase->list,
     186                 :            :                                                        typeof(*obj),
     187                 :            :                                                        mm.link))) {
     188         [ #  # ]:          0 :                         list_move_tail(&obj->mm.link, &still_in_list);
     189                 :            : 
     190   [ #  #  #  # ]:          0 :                         if (shrink & I915_SHRINK_VMAPS &&
     191                 :          0 :                             !is_vmalloc_addr(obj->mm.mapping))
     192                 :          0 :                                 continue;
     193                 :            : 
     194   [ #  #  #  # ]:          0 :                         if (!(shrink & I915_SHRINK_ACTIVE) &&
     195                 :            :                             i915_gem_object_is_framebuffer(obj))
     196                 :          0 :                                 continue;
     197                 :            : 
     198   [ #  #  #  # ]:          0 :                         if (!(shrink & I915_SHRINK_BOUND) &&
     199                 :          0 :                             atomic_read(&obj->bind_count))
     200                 :          0 :                                 continue;
     201                 :            : 
     202         [ #  # ]:          0 :                         if (!can_release_pages(obj))
     203                 :          0 :                                 continue;
     204                 :            : 
     205         [ #  # ]:          0 :                         if (!kref_get_unless_zero(&obj->base.refcount))
     206                 :          0 :                                 continue;
     207                 :            : 
     208                 :          0 :                         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
     209                 :            : 
     210         [ #  # ]:          0 :                         if (unsafe_drop_pages(obj, shrink)) {
     211                 :            :                                 /* May arrive from get_pages on another bo */
     212                 :          0 :                                 mutex_lock(&obj->mm.lock);
     213   [ #  #  #  # ]:          0 :                                 if (!i915_gem_object_has_pages(obj)) {
     214                 :          0 :                                         try_to_writeback(obj, shrink);
     215                 :          0 :                                         count += obj->base.size >> PAGE_SHIFT;
     216                 :            :                                 }
     217                 :          0 :                                 mutex_unlock(&obj->mm.lock);
     218                 :            :                         }
     219                 :            : 
     220                 :          0 :                         scanned += obj->base.size >> PAGE_SHIFT;
     221                 :          0 :                         i915_gem_object_put(obj);
     222                 :            : 
     223         [ #  # ]:          0 :                         spin_lock_irqsave(&i915->mm.obj_lock, flags);
     224                 :            :                 }
     225         [ #  # ]:          0 :                 list_splice_tail(&still_in_list, phase->list);
     226                 :          0 :                 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
     227                 :            :         }
     228                 :            : 
     229         [ #  # ]:          0 :         if (shrink & I915_SHRINK_BOUND)
     230                 :          0 :                 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
     231                 :            : 
     232         [ #  # ]:          0 :         if (nr_scanned)
     233                 :          0 :                 *nr_scanned += scanned;
     234                 :          0 :         return count;
     235                 :            : }
     236                 :            : 
     237                 :            : /**
     238                 :            :  * i915_gem_shrink_all - Shrink buffer object caches completely
     239                 :            :  * @i915: i915 device
     240                 :            :  *
     241                 :            :  * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
     242                 :            :  * caches completely. It also first waits for and retires all outstanding
     243                 :            :  * requests to also be able to release backing storage for active objects.
     244                 :            :  *
     245                 :            :  * This should only be used in code to intentionally quiescent the gpu or as a
     246                 :            :  * last-ditch effort when memory seems to have run out.
     247                 :            :  *
     248                 :            :  * Returns:
     249                 :            :  * The number of pages of backing storage actually released.
     250                 :            :  */
     251                 :          0 : unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
     252                 :            : {
     253                 :          0 :         intel_wakeref_t wakeref;
     254                 :          0 :         unsigned long freed = 0;
     255                 :            : 
     256         [ #  # ]:          0 :         with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
     257                 :          0 :                 freed = i915_gem_shrink(i915, -1UL, NULL,
     258                 :            :                                         I915_SHRINK_BOUND |
     259                 :            :                                         I915_SHRINK_UNBOUND);
     260                 :            :         }
     261                 :            : 
     262                 :          0 :         return freed;
     263                 :            : }
     264                 :            : 
     265                 :            : static unsigned long
     266                 :          0 : i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
     267                 :            : {
     268                 :          0 :         struct drm_i915_private *i915 =
     269                 :          0 :                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
     270                 :          0 :         unsigned long num_objects;
     271                 :          0 :         unsigned long count;
     272                 :            : 
     273         [ #  # ]:          0 :         count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
     274                 :          0 :         num_objects = READ_ONCE(i915->mm.shrink_count);
     275                 :            : 
     276                 :            :         /*
     277                 :            :          * Update our preferred vmscan batch size for the next pass.
     278                 :            :          * Our rough guess for an effective batch size is roughly 2
     279                 :            :          * available GEM objects worth of pages. That is we don't want
     280                 :            :          * the shrinker to fire, until it is worth the cost of freeing an
     281                 :            :          * entire GEM object.
     282                 :            :          */
     283         [ #  # ]:          0 :         if (num_objects) {
     284                 :          0 :                 unsigned long avg = 2 * count / num_objects;
     285                 :            : 
     286                 :          0 :                 i915->mm.shrinker.batch =
     287                 :          0 :                         max((i915->mm.shrinker.batch + avg) >> 1,
     288                 :            :                             128ul /* default SHRINK_BATCH */);
     289                 :            :         }
     290                 :            : 
     291                 :          0 :         return count;
     292                 :            : }
     293                 :            : 
     294                 :            : static unsigned long
     295                 :          0 : i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
     296                 :            : {
     297                 :          0 :         struct drm_i915_private *i915 =
     298                 :          0 :                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
     299                 :          0 :         unsigned long freed;
     300                 :            : 
     301                 :          0 :         sc->nr_scanned = 0;
     302                 :            : 
     303                 :          0 :         freed = i915_gem_shrink(i915,
     304                 :            :                                 sc->nr_to_scan,
     305                 :            :                                 &sc->nr_scanned,
     306                 :            :                                 I915_SHRINK_BOUND |
     307                 :            :                                 I915_SHRINK_UNBOUND);
     308   [ #  #  #  # ]:          0 :         if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
     309                 :          0 :                 intel_wakeref_t wakeref;
     310                 :            : 
     311         [ #  # ]:          0 :                 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
     312                 :          0 :                         freed += i915_gem_shrink(i915,
     313                 :          0 :                                                  sc->nr_to_scan - sc->nr_scanned,
     314                 :            :                                                  &sc->nr_scanned,
     315                 :            :                                                  I915_SHRINK_ACTIVE |
     316                 :            :                                                  I915_SHRINK_BOUND |
     317                 :            :                                                  I915_SHRINK_UNBOUND |
     318                 :            :                                                  I915_SHRINK_WRITEBACK);
     319                 :            :                 }
     320                 :            :         }
     321                 :            : 
     322         [ #  # ]:          0 :         return sc->nr_scanned ? freed : SHRINK_STOP;
     323                 :            : }
     324                 :            : 
     325                 :            : static int
     326                 :          0 : i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
     327                 :            : {
     328                 :          0 :         struct drm_i915_private *i915 =
     329                 :          0 :                 container_of(nb, struct drm_i915_private, mm.oom_notifier);
     330                 :          0 :         struct drm_i915_gem_object *obj;
     331                 :          0 :         unsigned long unevictable, available, freed_pages;
     332                 :          0 :         intel_wakeref_t wakeref;
     333                 :          0 :         unsigned long flags;
     334                 :            : 
     335                 :          0 :         freed_pages = 0;
     336         [ #  # ]:          0 :         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
     337                 :          0 :                 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
     338                 :            :                                                I915_SHRINK_BOUND |
     339                 :            :                                                I915_SHRINK_UNBOUND |
     340                 :            :                                                I915_SHRINK_WRITEBACK);
     341                 :            : 
     342                 :            :         /* Because we may be allocating inside our own driver, we cannot
     343                 :            :          * assert that there are no objects with pinned pages that are not
     344                 :            :          * being pointed to by hardware.
     345                 :            :          */
     346                 :          0 :         available = unevictable = 0;
     347                 :          0 :         spin_lock_irqsave(&i915->mm.obj_lock, flags);
     348         [ #  # ]:          0 :         list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
     349         [ #  # ]:          0 :                 if (!can_release_pages(obj))
     350                 :          0 :                         unevictable += obj->base.size >> PAGE_SHIFT;
     351                 :            :                 else
     352                 :          0 :                         available += obj->base.size >> PAGE_SHIFT;
     353                 :            :         }
     354                 :          0 :         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
     355                 :            : 
     356         [ #  # ]:          0 :         if (freed_pages || available)
     357                 :          0 :                 pr_info("Purging GPU memory, %lu pages freed, "
     358                 :            :                         "%lu pages still pinned, %lu pages left available.\n",
     359                 :            :                         freed_pages, unevictable, available);
     360                 :            : 
     361                 :          0 :         *(unsigned long *)ptr += freed_pages;
     362                 :          0 :         return NOTIFY_DONE;
     363                 :            : }
     364                 :            : 
     365                 :            : static int
     366                 :          0 : i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
     367                 :            : {
     368                 :          0 :         struct drm_i915_private *i915 =
     369                 :          0 :                 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
     370                 :          0 :         struct i915_vma *vma, *next;
     371                 :          0 :         unsigned long freed_pages = 0;
     372                 :          0 :         intel_wakeref_t wakeref;
     373                 :            : 
     374         [ #  # ]:          0 :         with_intel_runtime_pm(&i915->runtime_pm, wakeref)
     375                 :          0 :                 freed_pages += i915_gem_shrink(i915, -1UL, NULL,
     376                 :            :                                                I915_SHRINK_BOUND |
     377                 :            :                                                I915_SHRINK_UNBOUND |
     378                 :            :                                                I915_SHRINK_VMAPS);
     379                 :            : 
     380                 :            :         /* We also want to clear any cached iomaps as they wrap vmap */
     381                 :          0 :         mutex_lock(&i915->ggtt.vm.mutex);
     382         [ #  # ]:          0 :         list_for_each_entry_safe(vma, next,
     383                 :            :                                  &i915->ggtt.vm.bound_list, vm_link) {
     384                 :          0 :                 unsigned long count = vma->node.size >> PAGE_SHIFT;
     385                 :            : 
     386   [ #  #  #  # ]:          0 :                 if (!vma->iomap || i915_vma_is_active(vma))
     387                 :          0 :                         continue;
     388                 :            : 
     389         [ #  # ]:          0 :                 if (__i915_vma_unbind(vma) == 0)
     390                 :          0 :                         freed_pages += count;
     391                 :            :         }
     392                 :          0 :         mutex_unlock(&i915->ggtt.vm.mutex);
     393                 :            : 
     394                 :          0 :         *(unsigned long *)ptr += freed_pages;
     395                 :          0 :         return NOTIFY_DONE;
     396                 :            : }
     397                 :            : 
     398                 :          0 : void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
     399                 :            : {
     400                 :          0 :         i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
     401                 :          0 :         i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
     402                 :          0 :         i915->mm.shrinker.seeks = DEFAULT_SEEKS;
     403                 :          0 :         i915->mm.shrinker.batch = 4096;
     404         [ #  # ]:          0 :         WARN_ON(register_shrinker(&i915->mm.shrinker));
     405                 :            : 
     406                 :          0 :         i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
     407         [ #  # ]:          0 :         WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
     408                 :            : 
     409                 :          0 :         i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
     410         [ #  # ]:          0 :         WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
     411                 :          0 : }
     412                 :            : 
     413                 :          0 : void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
     414                 :            : {
     415         [ #  # ]:          0 :         WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
     416         [ #  # ]:          0 :         WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
     417                 :          0 :         unregister_shrinker(&i915->mm.shrinker);
     418                 :          0 : }
     419                 :            : 
     420                 :          0 : void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
     421                 :            :                                     struct mutex *mutex)
     422                 :            : {
     423                 :          0 :         bool unlock = false;
     424                 :            : 
     425                 :          0 :         if (!IS_ENABLED(CONFIG_LOCKDEP))
     426                 :          0 :                 return;
     427                 :            : 
     428                 :            :         if (!lockdep_is_held_type(&i915->drm.struct_mutex, -1)) {
     429                 :            :                 mutex_acquire(&i915->drm.struct_mutex.dep_map,
     430                 :            :                               I915_MM_NORMAL, 0, _RET_IP_);
     431                 :            :                 unlock = true;
     432                 :            :         }
     433                 :            : 
     434                 :            :         fs_reclaim_acquire(GFP_KERNEL);
     435                 :            : 
     436                 :            :         mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
     437                 :            :         mutex_release(&mutex->dep_map, _RET_IP_);
     438                 :            : 
     439                 :            :         fs_reclaim_release(GFP_KERNEL);
     440                 :            : 
     441                 :            :         if (unlock)
     442                 :          0 :                 mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_);
     443                 :            : }
     444                 :            : 
     445                 :            : #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
     446                 :            : 
     447                 :          0 : void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
     448                 :            : {
     449                 :          0 :         struct drm_i915_private *i915 = obj_to_i915(obj);
     450                 :          0 :         unsigned long flags;
     451                 :            : 
     452                 :            :         /*
     453                 :            :          * We can only be called while the pages are pinned or when
     454                 :            :          * the pages are released. If pinned, we should only be called
     455                 :            :          * from a single caller under controlled conditions; and on release
     456                 :            :          * only one caller may release us. Neither the two may cross.
     457                 :            :          */
     458         [ #  # ]:          0 :         if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
     459                 :            :                 return;
     460                 :            : 
     461                 :          0 :         spin_lock_irqsave(&i915->mm.obj_lock, flags);
     462   [ #  #  #  # ]:          0 :         if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
     463         [ #  # ]:          0 :             !list_empty(&obj->mm.link)) {
     464                 :          0 :                 list_del_init(&obj->mm.link);
     465                 :          0 :                 i915->mm.shrink_count--;
     466                 :          0 :                 i915->mm.shrink_memory -= obj->base.size;
     467                 :            :         }
     468                 :          0 :         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
     469                 :            : }
     470                 :            : 
     471                 :          0 : static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
     472                 :            :                                               struct list_head *head)
     473                 :            : {
     474         [ #  # ]:          0 :         struct drm_i915_private *i915 = obj_to_i915(obj);
     475                 :          0 :         unsigned long flags;
     476                 :            : 
     477                 :          0 :         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
     478         [ #  # ]:          0 :         if (!i915_gem_object_is_shrinkable(obj))
     479                 :            :                 return;
     480                 :            : 
     481         [ #  # ]:          0 :         if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
     482                 :            :                 return;
     483                 :            : 
     484                 :          0 :         spin_lock_irqsave(&i915->mm.obj_lock, flags);
     485                 :          0 :         GEM_BUG_ON(!kref_read(&obj->base.refcount));
     486         [ #  # ]:          0 :         if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
     487                 :          0 :                 GEM_BUG_ON(!list_empty(&obj->mm.link));
     488                 :            : 
     489                 :          0 :                 list_add_tail(&obj->mm.link, head);
     490                 :          0 :                 i915->mm.shrink_count++;
     491                 :          0 :                 i915->mm.shrink_memory += obj->base.size;
     492                 :            : 
     493                 :            :         }
     494                 :          0 :         spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
     495                 :            : }
     496                 :            : 
     497                 :          0 : void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
     498                 :            : {
     499                 :          0 :         __i915_gem_object_make_shrinkable(obj,
     500                 :          0 :                                           &obj_to_i915(obj)->mm.shrink_list);
     501                 :          0 : }
     502                 :            : 
     503                 :          0 : void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
     504                 :            : {
     505                 :          0 :         __i915_gem_object_make_shrinkable(obj,
     506                 :          0 :                                           &obj_to_i915(obj)->mm.purge_list);
     507                 :          0 : }

Generated by: LCOV version 1.14