LCOV - code coverage report
Current view: top level - net/core - page_pool.c (source / functions) Hit Total Coverage
Test: combined.info Lines: 0 200 0.0 %
Date: 2022-04-01 14:17:54 Functions: 0 18 0.0 %
Branches: 0 98 0.0 %

           Branch data     Line data    Source code
       1                 :            : /* SPDX-License-Identifier: GPL-2.0
       2                 :            :  *
       3                 :            :  * page_pool.c
       4                 :            :  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
       5                 :            :  *      Copyright (C) 2016 Red Hat, Inc.
       6                 :            :  */
       7                 :            : 
       8                 :            : #include <linux/types.h>
       9                 :            : #include <linux/kernel.h>
      10                 :            : #include <linux/slab.h>
      11                 :            : #include <linux/device.h>
      12                 :            : 
      13                 :            : #include <net/page_pool.h>
      14                 :            : #include <linux/dma-direction.h>
      15                 :            : #include <linux/dma-mapping.h>
      16                 :            : #include <linux/page-flags.h>
      17                 :            : #include <linux/mm.h> /* for __put_page() */
      18                 :            : 
      19                 :            : #include <trace/events/page_pool.h>
      20                 :            : 
      21                 :            : #define DEFER_TIME (msecs_to_jiffies(1000))
      22                 :            : #define DEFER_WARN_INTERVAL (60 * HZ)
      23                 :            : 
      24                 :          0 : static int page_pool_init(struct page_pool *pool,
      25                 :            :                           const struct page_pool_params *params)
      26                 :            : {
      27                 :          0 :         unsigned int ring_qsize = 1024; /* Default */
      28                 :            : 
      29                 :          0 :         memcpy(&pool->p, params, sizeof(pool->p));
      30                 :            : 
      31                 :            :         /* Validate only known flags were used */
      32         [ #  # ]:          0 :         if (pool->p.flags & ~(PP_FLAG_ALL))
      33                 :            :                 return -EINVAL;
      34                 :            : 
      35         [ #  # ]:          0 :         if (pool->p.pool_size)
      36                 :          0 :                 ring_qsize = pool->p.pool_size;
      37                 :            : 
      38                 :            :         /* Sanity limit mem that can be pinned down */
      39         [ #  # ]:          0 :         if (ring_qsize > 32768)
      40                 :            :                 return -E2BIG;
      41                 :            : 
      42                 :            :         /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
      43                 :            :          * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
      44                 :            :          * which is the XDP_TX use-case.
      45                 :            :          */
      46         [ #  # ]:          0 :         if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
      47                 :            :             (pool->p.dma_dir != DMA_BIDIRECTIONAL))
      48                 :            :                 return -EINVAL;
      49                 :            : 
      50         [ #  # ]:          0 :         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
      51                 :            :                 /* In order to request DMA-sync-for-device the page
      52                 :            :                  * needs to be mapped
      53                 :            :                  */
      54         [ #  # ]:          0 :                 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
      55                 :            :                         return -EINVAL;
      56                 :            : 
      57         [ #  # ]:          0 :                 if (!pool->p.max_len)
      58                 :            :                         return -EINVAL;
      59                 :            : 
      60                 :            :                 /* pool->p.offset has to be set according to the address
      61                 :            :                  * offset used by the DMA engine to start copying rx data
      62                 :            :                  */
      63                 :            :         }
      64                 :            : 
      65         [ #  # ]:          0 :         if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
      66                 :            :                 return -ENOMEM;
      67                 :            : 
      68                 :          0 :         atomic_set(&pool->pages_state_release_cnt, 0);
      69                 :            : 
      70                 :            :         /* Driver calling page_pool_create() also call page_pool_destroy() */
      71                 :          0 :         refcount_set(&pool->user_cnt, 1);
      72                 :            : 
      73         [ #  # ]:          0 :         if (pool->p.flags & PP_FLAG_DMA_MAP)
      74                 :          0 :                 get_device(pool->p.dev);
      75                 :            : 
      76                 :            :         return 0;
      77                 :            : }
      78                 :            : 
      79                 :          0 : struct page_pool *page_pool_create(const struct page_pool_params *params)
      80                 :            : {
      81                 :          0 :         struct page_pool *pool;
      82                 :          0 :         int err;
      83                 :            : 
      84                 :          0 :         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
      85         [ #  # ]:          0 :         if (!pool)
      86                 :            :                 return ERR_PTR(-ENOMEM);
      87                 :            : 
      88                 :          0 :         err = page_pool_init(pool, params);
      89         [ #  # ]:          0 :         if (err < 0) {
      90                 :          0 :                 pr_warn("%s() gave up with errno %d\n", __func__, err);
      91                 :          0 :                 kfree(pool);
      92                 :          0 :                 return ERR_PTR(err);
      93                 :            :         }
      94                 :            : 
      95                 :            :         return pool;
      96                 :            : }
      97                 :            : EXPORT_SYMBOL(page_pool_create);
      98                 :            : 
      99                 :            : static void __page_pool_return_page(struct page_pool *pool, struct page *page);
     100                 :            : 
     101                 :            : noinline
     102                 :          0 : static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
     103                 :            : {
     104                 :          0 :         struct ptr_ring *r = &pool->ring;
     105                 :          0 :         struct page *page;
     106                 :          0 :         int pref_nid; /* preferred NUMA node */
     107                 :            : 
     108                 :            :         /* Quicker fallback, avoid locks when ring is empty */
     109   [ #  #  #  # ]:          0 :         if (__ptr_ring_empty(r))
     110                 :            :                 return NULL;
     111                 :            : 
     112                 :            :         /* Softirq guarantee CPU and thus NUMA node is stable. This,
     113                 :            :          * assumes CPU refilling driver RX-ring will also run RX-NAPI.
     114                 :            :          */
     115                 :            : #ifdef CONFIG_NUMA
     116         [ #  # ]:          0 :         pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
     117                 :            : #else
     118                 :            :         /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
     119                 :            :         pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
     120                 :            : #endif
     121                 :            : 
     122                 :            :         /* Slower-path: Get pages from locked ring queue */
     123                 :          0 :         spin_lock(&r->consumer_lock);
     124                 :            : 
     125                 :            :         /* Refill alloc array, but only if NUMA match */
     126                 :          0 :         do {
     127                 :          0 :                 page = __ptr_ring_consume(r);
     128         [ #  # ]:          0 :                 if (unlikely(!page))
     129                 :            :                         break;
     130                 :            : 
     131         [ #  # ]:          0 :                 if (likely(page_to_nid(page) == pref_nid)) {
     132                 :          0 :                         pool->alloc.cache[pool->alloc.count++] = page;
     133                 :            :                 } else {
     134                 :            :                         /* NUMA mismatch;
     135                 :            :                          * (1) release 1 page to page-allocator and
     136                 :            :                          * (2) break out to fallthrough to alloc_pages_node.
     137                 :            :                          * This limit stress on page buddy alloactor.
     138                 :            :                          */
     139                 :          0 :                         __page_pool_return_page(pool, page);
     140                 :          0 :                         page = NULL;
     141                 :          0 :                         break;
     142                 :            :                 }
     143         [ #  # ]:          0 :         } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
     144                 :            : 
     145                 :            :         /* Return last page */
     146         [ #  # ]:          0 :         if (likely(pool->alloc.count > 0))
     147                 :          0 :                 page = pool->alloc.cache[--pool->alloc.count];
     148                 :            : 
     149                 :          0 :         spin_unlock(&r->consumer_lock);
     150                 :          0 :         return page;
     151                 :            : }
     152                 :            : 
     153                 :            : /* fast path */
     154                 :          0 : static struct page *__page_pool_get_cached(struct page_pool *pool)
     155                 :            : {
     156                 :          0 :         struct page *page;
     157                 :            : 
     158                 :            :         /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
     159                 :          0 :         if (likely(pool->alloc.count)) {
     160                 :            :                 /* Fast-path */
     161                 :          0 :                 page = pool->alloc.cache[--pool->alloc.count];
     162                 :            :         } else {
     163                 :          0 :                 page = page_pool_refill_alloc_cache(pool);
     164                 :            :         }
     165                 :            : 
     166                 :          0 :         return page;
     167                 :            : }
     168                 :            : 
     169                 :          0 : static void page_pool_dma_sync_for_device(struct page_pool *pool,
     170                 :            :                                           struct page *page,
     171                 :            :                                           unsigned int dma_sync_size)
     172                 :            : {
     173                 :          0 :         dma_sync_size = min(dma_sync_size, pool->p.max_len);
     174                 :          0 :         dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
     175                 :          0 :                                          pool->p.offset, dma_sync_size,
     176                 :            :                                          pool->p.dma_dir);
     177                 :          0 : }
     178                 :            : 
     179                 :            : /* slow path */
     180                 :            : noinline
     181                 :          0 : static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
     182                 :            :                                                  gfp_t _gfp)
     183                 :            : {
     184                 :          0 :         struct page *page;
     185                 :          0 :         gfp_t gfp = _gfp;
     186                 :          0 :         dma_addr_t dma;
     187                 :            : 
     188                 :            :         /* We could always set __GFP_COMP, and avoid this branch, as
     189                 :            :          * prep_new_page() can handle order-0 with __GFP_COMP.
     190                 :            :          */
     191         [ #  # ]:          0 :         if (pool->p.order)
     192                 :          0 :                 gfp |= __GFP_COMP;
     193                 :            : 
     194                 :            :         /* FUTURE development:
     195                 :            :          *
     196                 :            :          * Current slow-path essentially falls back to single page
     197                 :            :          * allocations, which doesn't improve performance.  This code
     198                 :            :          * need bulk allocation support from the page allocator code.
     199                 :            :          */
     200                 :            : 
     201                 :            :         /* Cache was empty, do real allocation */
     202                 :            : #ifdef CONFIG_NUMA
     203         [ #  # ]:          0 :         page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
     204                 :            : #else
     205                 :            :         page = alloc_pages(gfp, pool->p.order);
     206                 :            : #endif
     207         [ #  # ]:          0 :         if (!page)
     208                 :            :                 return NULL;
     209                 :            : 
     210         [ #  # ]:          0 :         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
     211                 :          0 :                 goto skip_dma_map;
     212                 :            : 
     213                 :            :         /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
     214                 :            :          * since dma_addr_t can be either 32 or 64 bits and does not always fit
     215                 :            :          * into page private data (i.e 32bit cpu with 64bit DMA caps)
     216                 :            :          * This mapping is kept for lifetime of page, until leaving pool.
     217                 :            :          */
     218                 :          0 :         dma = dma_map_page_attrs(pool->p.dev, page, 0,
     219                 :          0 :                                  (PAGE_SIZE << pool->p.order),
     220                 :            :                                  pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
     221                 :          0 :         if (dma_mapping_error(pool->p.dev, dma)) {
     222                 :          0 :                 put_page(page);
     223                 :          0 :                 return NULL;
     224                 :            :         }
     225                 :          0 :         page->dma_addr = dma;
     226                 :            : 
     227         [ #  # ]:          0 :         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
     228                 :          0 :                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
     229                 :            : 
     230                 :          0 : skip_dma_map:
     231                 :            :         /* Track how many pages are held 'in-flight' */
     232                 :          0 :         pool->pages_state_hold_cnt++;
     233                 :            : 
     234                 :          0 :         trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
     235                 :            : 
     236                 :            :         /* When page just alloc'ed is should/must have refcnt 1. */
     237                 :          0 :         return page;
     238                 :            : }
     239                 :            : 
     240                 :            : /* For using page_pool replace: alloc_pages() API calls, but provide
     241                 :            :  * synchronization guarantee for allocation side.
     242                 :            :  */
     243                 :          0 : struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
     244                 :            : {
     245                 :          0 :         struct page *page;
     246                 :            : 
     247                 :            :         /* Fast-path: Get a page from cache */
     248         [ #  # ]:          0 :         page = __page_pool_get_cached(pool);
     249         [ #  # ]:          0 :         if (page)
     250                 :            :                 return page;
     251                 :            : 
     252                 :            :         /* Slow-path: cache empty, do real allocation */
     253                 :          0 :         page = __page_pool_alloc_pages_slow(pool, gfp);
     254                 :          0 :         return page;
     255                 :            : }
     256                 :            : EXPORT_SYMBOL(page_pool_alloc_pages);
     257                 :            : 
     258                 :            : /* Calculate distance between two u32 values, valid if distance is below 2^(31)
     259                 :            :  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
     260                 :            :  */
     261                 :            : #define _distance(a, b) (s32)((a) - (b))
     262                 :            : 
     263                 :          0 : static s32 page_pool_inflight(struct page_pool *pool)
     264                 :            : {
     265                 :          0 :         u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
     266                 :          0 :         u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
     267                 :          0 :         s32 inflight;
     268                 :            : 
     269                 :          0 :         inflight = _distance(hold_cnt, release_cnt);
     270                 :            : 
     271                 :          0 :         trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
     272         [ #  # ]:          0 :         WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
     273                 :            : 
     274                 :          0 :         return inflight;
     275                 :            : }
     276                 :            : 
     277                 :            : /* Cleanup page_pool state from page */
     278                 :          0 : static void __page_pool_clean_page(struct page_pool *pool,
     279                 :            :                                    struct page *page)
     280                 :            : {
     281                 :          0 :         dma_addr_t dma;
     282                 :          0 :         int count;
     283                 :            : 
     284         [ #  # ]:          0 :         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
     285                 :          0 :                 goto skip_dma_unmap;
     286                 :            : 
     287                 :          0 :         dma = page->dma_addr;
     288                 :            :         /* DMA unmap */
     289                 :          0 :         dma_unmap_page_attrs(pool->p.dev, dma,
     290                 :          0 :                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
     291                 :            :                              DMA_ATTR_SKIP_CPU_SYNC);
     292                 :          0 :         page->dma_addr = 0;
     293                 :          0 : skip_dma_unmap:
     294                 :            :         /* This may be the last page returned, releasing the pool, so
     295                 :            :          * it is not safe to reference pool afterwards.
     296                 :            :          */
     297                 :          0 :         count = atomic_inc_return(&pool->pages_state_release_cnt);
     298                 :          0 :         trace_page_pool_state_release(pool, page, count);
     299                 :          0 : }
     300                 :            : 
     301                 :            : /* unmap the page and clean our state */
     302                 :          0 : void page_pool_unmap_page(struct page_pool *pool, struct page *page)
     303                 :            : {
     304                 :            :         /* When page is unmapped, this implies page will not be
     305                 :            :          * returned to page_pool.
     306                 :            :          */
     307                 :          0 :         __page_pool_clean_page(pool, page);
     308                 :          0 : }
     309                 :            : EXPORT_SYMBOL(page_pool_unmap_page);
     310                 :            : 
     311                 :            : /* Return a page to the page allocator, cleaning up our state */
     312                 :          0 : static void __page_pool_return_page(struct page_pool *pool, struct page *page)
     313                 :            : {
     314                 :          0 :         __page_pool_clean_page(pool, page);
     315                 :            : 
     316                 :          0 :         put_page(page);
     317                 :            :         /* An optimization would be to call __free_pages(page, pool->p.order)
     318                 :            :          * knowing page is not part of page-cache (thus avoiding a
     319                 :            :          * __page_cache_release() call).
     320                 :            :          */
     321                 :          0 : }
     322                 :            : 
     323                 :          0 : static bool __page_pool_recycle_into_ring(struct page_pool *pool,
     324                 :            :                                    struct page *page)
     325                 :            : {
     326                 :          0 :         int ret;
     327                 :            :         /* BH protection not needed if current is serving softirq */
     328         [ #  # ]:          0 :         if (in_serving_softirq())
     329                 :          0 :                 ret = ptr_ring_produce(&pool->ring, page);
     330                 :            :         else
     331                 :          0 :                 ret = ptr_ring_produce_bh(&pool->ring, page);
     332                 :            : 
     333                 :          0 :         return (ret == 0) ? true : false;
     334                 :            : }
     335                 :            : 
     336                 :            : /* Only allow direct recycling in special circumstances, into the
     337                 :            :  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
     338                 :            :  *
     339                 :            :  * Caller must provide appropriate safe context.
     340                 :            :  */
     341                 :          0 : static bool __page_pool_recycle_direct(struct page *page,
     342                 :            :                                        struct page_pool *pool)
     343                 :            : {
     344                 :          0 :         if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
     345                 :            :                 return false;
     346                 :            : 
     347                 :            :         /* Caller MUST have verified/know (page_ref_count(page) == 1) */
     348                 :          0 :         pool->alloc.cache[pool->alloc.count++] = page;
     349                 :          0 :         return true;
     350                 :            : }
     351                 :            : 
     352                 :            : /* page is NOT reusable when:
     353                 :            :  * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
     354                 :            :  */
     355                 :          0 : static bool pool_page_reusable(struct page_pool *pool, struct page *page)
     356                 :            : {
     357         [ #  # ]:          0 :         return !page_is_pfmemalloc(page);
     358                 :            : }
     359                 :            : 
     360                 :          0 : void __page_pool_put_page(struct page_pool *pool, struct page *page,
     361                 :            :                           unsigned int dma_sync_size, bool allow_direct)
     362                 :            : {
     363                 :            :         /* This allocator is optimized for the XDP mode that uses
     364                 :            :          * one-frame-per-page, but have fallbacks that act like the
     365                 :            :          * regular page allocator APIs.
     366                 :            :          *
     367                 :            :          * refcnt == 1 means page_pool owns page, and can recycle it.
     368                 :            :          */
     369   [ #  #  #  # ]:          0 :         if (likely(page_ref_count(page) == 1 &&
     370                 :            :                    pool_page_reusable(pool, page))) {
     371                 :            :                 /* Read barrier done in page_ref_count / READ_ONCE */
     372                 :            : 
     373         [ #  # ]:          0 :                 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
     374                 :          0 :                         page_pool_dma_sync_for_device(pool, page,
     375                 :            :                                                       dma_sync_size);
     376                 :            : 
     377   [ #  #  #  # ]:          0 :                 if (allow_direct && in_serving_softirq())
     378         [ #  # ]:          0 :                         if (__page_pool_recycle_direct(page, pool))
     379                 :          0 :                                 return;
     380                 :            : 
     381         [ #  # ]:          0 :                 if (!__page_pool_recycle_into_ring(pool, page)) {
     382                 :            :                         /* Cache full, fallback to free pages */
     383                 :          0 :                         __page_pool_return_page(pool, page);
     384                 :            :                 }
     385                 :          0 :                 return;
     386                 :            :         }
     387                 :            :         /* Fallback/non-XDP mode: API user have elevated refcnt.
     388                 :            :          *
     389                 :            :          * Many drivers split up the page into fragments, and some
     390                 :            :          * want to keep doing this to save memory and do refcnt based
     391                 :            :          * recycling. Support this use case too, to ease drivers
     392                 :            :          * switching between XDP/non-XDP.
     393                 :            :          *
     394                 :            :          * In-case page_pool maintains the DMA mapping, API user must
     395                 :            :          * call page_pool_put_page once.  In this elevated refcnt
     396                 :            :          * case, the DMA is unmapped/released, as driver is likely
     397                 :            :          * doing refcnt based recycle tricks, meaning another process
     398                 :            :          * will be invoking put_page.
     399                 :            :          */
     400                 :          0 :         __page_pool_clean_page(pool, page);
     401                 :          0 :         put_page(page);
     402                 :            : }
     403                 :            : EXPORT_SYMBOL(__page_pool_put_page);
     404                 :            : 
     405                 :          0 : static void __page_pool_empty_ring(struct page_pool *pool)
     406                 :            : {
     407                 :          0 :         struct page *page;
     408                 :            : 
     409                 :            :         /* Empty recycle ring */
     410         [ #  # ]:          0 :         while ((page = ptr_ring_consume_bh(&pool->ring))) {
     411                 :            :                 /* Verify the refcnt invariant of cached pages */
     412         [ #  # ]:          0 :                 if (!(page_ref_count(page) == 1))
     413                 :          0 :                         pr_crit("%s() page_pool refcnt %d violation\n",
     414                 :            :                                 __func__, page_ref_count(page));
     415                 :            : 
     416                 :          0 :                 __page_pool_return_page(pool, page);
     417                 :            :         }
     418                 :          0 : }
     419                 :            : 
     420                 :          0 : static void page_pool_free(struct page_pool *pool)
     421                 :            : {
     422         [ #  # ]:          0 :         if (pool->disconnect)
     423                 :          0 :                 pool->disconnect(pool);
     424                 :            : 
     425                 :          0 :         ptr_ring_cleanup(&pool->ring, NULL);
     426                 :            : 
     427         [ #  # ]:          0 :         if (pool->p.flags & PP_FLAG_DMA_MAP)
     428                 :          0 :                 put_device(pool->p.dev);
     429                 :            : 
     430                 :          0 :         kfree(pool);
     431                 :          0 : }
     432                 :            : 
     433                 :          0 : static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
     434                 :            : {
     435                 :          0 :         struct page *page;
     436                 :            : 
     437         [ #  # ]:          0 :         if (pool->destroy_cnt)
     438                 :            :                 return;
     439                 :            : 
     440                 :            :         /* Empty alloc cache, assume caller made sure this is
     441                 :            :          * no-longer in use, and page_pool_alloc_pages() cannot be
     442                 :            :          * call concurrently.
     443                 :            :          */
     444         [ #  # ]:          0 :         while (pool->alloc.count) {
     445                 :          0 :                 page = pool->alloc.cache[--pool->alloc.count];
     446                 :          0 :                 __page_pool_return_page(pool, page);
     447                 :            :         }
     448                 :            : }
     449                 :            : 
     450                 :          0 : static void page_pool_scrub(struct page_pool *pool)
     451                 :            : {
     452                 :          0 :         page_pool_empty_alloc_cache_once(pool);
     453                 :          0 :         pool->destroy_cnt++;
     454                 :            : 
     455                 :            :         /* No more consumers should exist, but producers could still
     456                 :            :          * be in-flight.
     457                 :            :          */
     458                 :          0 :         __page_pool_empty_ring(pool);
     459                 :            : }
     460                 :            : 
     461                 :          0 : static int page_pool_release(struct page_pool *pool)
     462                 :            : {
     463                 :          0 :         int inflight;
     464                 :            : 
     465                 :          0 :         page_pool_scrub(pool);
     466                 :          0 :         inflight = page_pool_inflight(pool);
     467         [ #  # ]:          0 :         if (!inflight)
     468                 :          0 :                 page_pool_free(pool);
     469                 :            : 
     470                 :          0 :         return inflight;
     471                 :            : }
     472                 :            : 
     473                 :          0 : static void page_pool_release_retry(struct work_struct *wq)
     474                 :            : {
     475                 :          0 :         struct delayed_work *dwq = to_delayed_work(wq);
     476                 :          0 :         struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
     477                 :          0 :         int inflight;
     478                 :            : 
     479                 :          0 :         inflight = page_pool_release(pool);
     480         [ #  # ]:          0 :         if (!inflight)
     481                 :            :                 return;
     482                 :            : 
     483                 :            :         /* Periodic warning */
     484         [ #  # ]:          0 :         if (time_after_eq(jiffies, pool->defer_warn)) {
     485                 :          0 :                 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
     486                 :            : 
     487                 :          0 :                 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
     488                 :            :                         __func__, inflight, sec);
     489                 :          0 :                 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
     490                 :            :         }
     491                 :            : 
     492                 :            :         /* Still not ready to be disconnected, retry later */
     493                 :          0 :         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
     494                 :            : }
     495                 :            : 
     496                 :          0 : void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
     497                 :            : {
     498                 :          0 :         refcount_inc(&pool->user_cnt);
     499                 :          0 :         pool->disconnect = disconnect;
     500                 :          0 : }
     501                 :            : 
     502                 :          0 : void page_pool_destroy(struct page_pool *pool)
     503                 :            : {
     504         [ #  # ]:          0 :         if (!pool)
     505                 :            :                 return;
     506                 :            : 
     507         [ #  # ]:          0 :         if (!page_pool_put(pool))
     508                 :            :                 return;
     509                 :            : 
     510         [ #  # ]:          0 :         if (!page_pool_release(pool))
     511                 :            :                 return;
     512                 :            : 
     513                 :          0 :         pool->defer_start = jiffies;
     514                 :          0 :         pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
     515                 :            : 
     516                 :          0 :         INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
     517                 :          0 :         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
     518                 :            : }
     519                 :            : EXPORT_SYMBOL(page_pool_destroy);
     520                 :            : 
     521                 :            : /* Caller must provide appropriate safe context, e.g. NAPI. */
     522                 :          0 : void page_pool_update_nid(struct page_pool *pool, int new_nid)
     523                 :            : {
     524                 :          0 :         struct page *page;
     525                 :            : 
     526                 :          0 :         trace_page_pool_update_nid(pool, new_nid);
     527                 :          0 :         pool->p.nid = new_nid;
     528                 :            : 
     529                 :            :         /* Flush pool alloc cache, as refill will check NUMA node */
     530         [ #  # ]:          0 :         while (pool->alloc.count) {
     531                 :          0 :                 page = pool->alloc.cache[--pool->alloc.count];
     532                 :          0 :                 __page_pool_return_page(pool, page);
     533                 :            :         }
     534                 :          0 : }
     535                 :            : EXPORT_SYMBOL(page_pool_update_nid);

Generated by: LCOV version 1.14