Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0-only
2 : : /*
3 : : * mm/percpu.c - percpu memory allocator
4 : : *
5 : : * Copyright (C) 2009 SUSE Linux Products GmbH
6 : : * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 : : *
8 : : * Copyright (C) 2017 Facebook Inc.
9 : : * Copyright (C) 2017 Dennis Zhou <dennisszhou@gmail.com>
10 : : *
11 : : * The percpu allocator handles both static and dynamic areas. Percpu
12 : : * areas are allocated in chunks which are divided into units. There is
13 : : * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 : : * based on NUMA properties of the machine.
15 : : *
16 : : * c0 c1 c2
17 : : * ------------------- ------------------- ------------
18 : : * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 : : * ------------------- ...... ------------------- .... ------------
20 : : *
21 : : * Allocation is done by offsets into a unit's address space. Ie., an
22 : : * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 : : * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 : : * and even sparse. Access is handled by configuring percpu base
25 : : * registers according to the cpu to unit mappings and offsetting the
26 : : * base address using pcpu_unit_size.
27 : : *
28 : : * There is special consideration for the first chunk which must handle
29 : : * the static percpu variables in the kernel image as allocation services
30 : : * are not online yet. In short, the first chunk is structured like so:
31 : : *
32 : : * <Static | [Reserved] | Dynamic>
33 : : *
34 : : * The static data is copied from the original section managed by the
35 : : * linker. The reserved section, if non-zero, primarily manages static
36 : : * percpu variables from kernel modules. Finally, the dynamic section
37 : : * takes care of normal allocations.
38 : : *
39 : : * The allocator organizes chunks into lists according to free size and
40 : : * tries to allocate from the fullest chunk first. Each chunk is managed
41 : : * by a bitmap with metadata blocks. The allocation map is updated on
42 : : * every allocation and free to reflect the current state while the boundary
43 : : * map is only updated on allocation. Each metadata block contains
44 : : * information to help mitigate the need to iterate over large portions
45 : : * of the bitmap. The reverse mapping from page to chunk is stored in
46 : : * the page's index. Lastly, units are lazily backed and grow in unison.
47 : : *
48 : : * There is a unique conversion that goes on here between bytes and bits.
49 : : * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
50 : : * tracks the number of pages it is responsible for in nr_pages. Helper
51 : : * functions are used to convert from between the bytes, bits, and blocks.
52 : : * All hints are managed in bits unless explicitly stated.
53 : : *
54 : : * To use this allocator, arch code should do the following:
55 : : *
56 : : * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
57 : : * regular address to percpu pointer and back if they need to be
58 : : * different from the default
59 : : *
60 : : * - use pcpu_setup_first_chunk() during percpu area initialization to
61 : : * setup the first chunk containing the kernel static percpu area
62 : : */
63 : :
64 : : #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 : :
66 : : #include <linux/bitmap.h>
67 : : #include <linux/memblock.h>
68 : : #include <linux/err.h>
69 : : #include <linux/lcm.h>
70 : : #include <linux/list.h>
71 : : #include <linux/log2.h>
72 : : #include <linux/mm.h>
73 : : #include <linux/module.h>
74 : : #include <linux/mutex.h>
75 : : #include <linux/percpu.h>
76 : : #include <linux/pfn.h>
77 : : #include <linux/slab.h>
78 : : #include <linux/spinlock.h>
79 : : #include <linux/vmalloc.h>
80 : : #include <linux/workqueue.h>
81 : : #include <linux/kmemleak.h>
82 : : #include <linux/sched.h>
83 : :
84 : : #include <asm/cacheflush.h>
85 : : #include <asm/sections.h>
86 : : #include <asm/tlbflush.h>
87 : : #include <asm/io.h>
88 : :
89 : : #define CREATE_TRACE_POINTS
90 : : #include <trace/events/percpu.h>
91 : :
92 : : #include "percpu-internal.h"
93 : :
94 : : /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
95 : : #define PCPU_SLOT_BASE_SHIFT 5
96 : : /* chunks in slots below this are subject to being sidelined on failed alloc */
97 : : #define PCPU_SLOT_FAIL_THRESHOLD 3
98 : :
99 : : #define PCPU_EMPTY_POP_PAGES_LOW 2
100 : : #define PCPU_EMPTY_POP_PAGES_HIGH 4
101 : :
102 : : #ifdef CONFIG_SMP
103 : : /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
104 : : #ifndef __addr_to_pcpu_ptr
105 : : #define __addr_to_pcpu_ptr(addr) \
106 : : (void __percpu *)((unsigned long)(addr) - \
107 : : (unsigned long)pcpu_base_addr + \
108 : : (unsigned long)__per_cpu_start)
109 : : #endif
110 : : #ifndef __pcpu_ptr_to_addr
111 : : #define __pcpu_ptr_to_addr(ptr) \
112 : : (void __force *)((unsigned long)(ptr) + \
113 : : (unsigned long)pcpu_base_addr - \
114 : : (unsigned long)__per_cpu_start)
115 : : #endif
116 : : #else /* CONFIG_SMP */
117 : : /* on UP, it's always identity mapped */
118 : : #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
119 : : #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
120 : : #endif /* CONFIG_SMP */
121 : :
122 : : static int pcpu_unit_pages __ro_after_init;
123 : : static int pcpu_unit_size __ro_after_init;
124 : : static int pcpu_nr_units __ro_after_init;
125 : : static int pcpu_atom_size __ro_after_init;
126 : : int pcpu_nr_slots __ro_after_init;
127 : : static size_t pcpu_chunk_struct_size __ro_after_init;
128 : :
129 : : /* cpus with the lowest and highest unit addresses */
130 : : static unsigned int pcpu_low_unit_cpu __ro_after_init;
131 : : static unsigned int pcpu_high_unit_cpu __ro_after_init;
132 : :
133 : : /* the address of the first chunk which starts with the kernel static area */
134 : : void *pcpu_base_addr __ro_after_init;
135 : : EXPORT_SYMBOL_GPL(pcpu_base_addr);
136 : :
137 : : static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
138 : : const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
139 : :
140 : : /* group information, used for vm allocation */
141 : : static int pcpu_nr_groups __ro_after_init;
142 : : static const unsigned long *pcpu_group_offsets __ro_after_init;
143 : : static const size_t *pcpu_group_sizes __ro_after_init;
144 : :
145 : : /*
146 : : * The first chunk which always exists. Note that unlike other
147 : : * chunks, this one can be allocated and mapped in several different
148 : : * ways and thus often doesn't live in the vmalloc area.
149 : : */
150 : : struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
151 : :
152 : : /*
153 : : * Optional reserved chunk. This chunk reserves part of the first
154 : : * chunk and serves it for reserved allocations. When the reserved
155 : : * region doesn't exist, the following variable is NULL.
156 : : */
157 : : struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
158 : :
159 : : DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
160 : : static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
161 : :
162 : : struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
163 : :
164 : : /* chunks which need their map areas extended, protected by pcpu_lock */
165 : : static LIST_HEAD(pcpu_map_extend_chunks);
166 : :
167 : : /*
168 : : * The number of empty populated pages, protected by pcpu_lock. The
169 : : * reserved chunk doesn't contribute to the count.
170 : : */
171 : : int pcpu_nr_empty_pop_pages;
172 : :
173 : : /*
174 : : * The number of populated pages in use by the allocator, protected by
175 : : * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
176 : : * allocated/deallocated, it is allocated/deallocated in all units of a chunk
177 : : * and increments/decrements this count by 1).
178 : : */
179 : : static unsigned long pcpu_nr_populated;
180 : :
181 : : /*
182 : : * Balance work is used to populate or destroy chunks asynchronously. We
183 : : * try to keep the number of populated free pages between
184 : : * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
185 : : * empty chunk.
186 : : */
187 : : static void pcpu_balance_workfn(struct work_struct *work);
188 : : static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
189 : : static bool pcpu_async_enabled __read_mostly;
190 : : static bool pcpu_atomic_alloc_failed;
191 : :
192 : 621 : static void pcpu_schedule_balance_work(void)
193 : : {
194 [ + - ]: 621 : if (pcpu_async_enabled)
195 : : schedule_work(&pcpu_balance_work);
196 : 621 : }
197 : :
198 : : /**
199 : : * pcpu_addr_in_chunk - check if the address is served from this chunk
200 : : * @chunk: chunk of interest
201 : : * @addr: percpu address
202 : : *
203 : : * RETURNS:
204 : : * True if the address is served from this chunk.
205 : : */
206 : : static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
207 : : {
208 : : void *start_addr, *end_addr;
209 : :
210 [ + - + - ]: 151857 : if (!chunk)
211 : : return false;
212 : :
213 : 151857 : start_addr = chunk->base_addr + chunk->start_offset;
214 : 303714 : end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
215 : 151857 : chunk->end_offset;
216 : :
217 : 151857 : return addr >= start_addr && addr < end_addr;
218 : : }
219 : :
220 : : static int __pcpu_size_to_slot(int size)
221 : : {
222 : 1030660 : int highbit = fls(size); /* size is in bytes */
223 : 1469130 : return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
224 : : }
225 : :
226 : : static int pcpu_size_to_slot(int size)
227 : : {
228 [ - + - + : 1469958 : if (size == pcpu_unit_size)
+ + ]
229 : 414 : return pcpu_nr_slots - 1;
230 : : return __pcpu_size_to_slot(size);
231 : : }
232 : :
233 : 1031074 : static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
234 : : {
235 : : const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
236 : :
237 [ + + + - ]: 2061941 : if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
238 : 1030867 : chunk_md->contig_hint == 0)
239 : : return 0;
240 : :
241 : 2061734 : return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
242 : : }
243 : :
244 : : /* set the pointer to a chunk in a page struct */
245 : : static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
246 : : {
247 : 4968 : page->index = (unsigned long)pcpu;
248 : : }
249 : :
250 : : /* obtain pointer to a chunk from a page struct */
251 : : static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
252 : : {
253 : 75411 : return (struct pcpu_chunk *)page->index;
254 : : }
255 : :
256 : : static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
257 : : {
258 : 11592 : return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
259 : : }
260 : :
261 : : static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
262 : : {
263 : 1757192 : return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
264 : : }
265 : :
266 : : static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
267 : : unsigned int cpu, int page_idx)
268 : : {
269 : 3514384 : return (unsigned long)chunk->base_addr +
270 : : pcpu_unit_page_offset(cpu, page_idx);
271 : : }
272 : :
273 : 1107494 : static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
274 : : {
275 : 1107494 : *rs = find_next_zero_bit(bitmap, end, *rs);
276 : 1107494 : *re = find_next_bit(bitmap, end, *rs + 1);
277 : 1107494 : }
278 : :
279 : 0 : static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
280 : : {
281 : 0 : *rs = find_next_bit(bitmap, end, *rs);
282 : 0 : *re = find_next_zero_bit(bitmap, end, *rs + 1);
283 : 0 : }
284 : :
285 : : /*
286 : : * Bitmap region iterators. Iterates over the bitmap between
287 : : * [@start, @end) in @chunk. @rs and @re should be integer variables
288 : : * and will be set to start and end index of the current free region.
289 : : */
290 : : #define pcpu_for_each_unpop_region(bitmap, rs, re, start, end) \
291 : : for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
292 : : (rs) < (re); \
293 : : (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
294 : :
295 : : #define pcpu_for_each_pop_region(bitmap, rs, re, start, end) \
296 : : for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end)); \
297 : : (rs) < (re); \
298 : : (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
299 : :
300 : : /*
301 : : * The following are helper functions to help access bitmaps and convert
302 : : * between bitmap offsets to address offsets.
303 : : */
304 : : static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
305 : : {
306 : 1726606 : return chunk->alloc_map +
307 : 863303 : (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
308 : : }
309 : :
310 : : static unsigned long pcpu_off_to_block_index(int off)
311 : : {
312 : 2060235 : return off / PCPU_BITMAP_BLOCK_BITS;
313 : : }
314 : :
315 : : static unsigned long pcpu_off_to_block_off(int off)
316 : : {
317 : 2060235 : return off & (PCPU_BITMAP_BLOCK_BITS - 1);
318 : : }
319 : :
320 : : static unsigned long pcpu_block_off_to_off(int index, int off)
321 : : {
322 : 536387 : return index * PCPU_BITMAP_BLOCK_BITS + off;
323 : : }
324 : :
325 : : /*
326 : : * pcpu_next_hint - determine which hint to use
327 : : * @block: block of interest
328 : : * @alloc_bits: size of allocation
329 : : *
330 : : * This determines if we should scan based on the scan_hint or first_free.
331 : : * In general, we want to scan from first_free to fulfill allocations by
332 : : * first fit. However, if we know a scan_hint at position scan_hint_start
333 : : * cannot fulfill an allocation, we can begin scanning from there knowing
334 : : * the contig_hint will be our fallback.
335 : : */
336 : : static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
337 : : {
338 : : /*
339 : : * The three conditions below determine if we can skip past the
340 : : * scan_hint. First, does the scan hint exist. Second, is the
341 : : * contig_hint after the scan_hint (possibly not true iff
342 : : * contig_hint == scan_hint). Third, is the allocation request
343 : : * larger than the scan_hint.
344 : : */
345 [ + + + - : 965519 : if (block->scan_hint &&
+ + + + ]
346 [ + + + + ]: 176426 : block->contig_hint_start > block->scan_hint_start &&
347 : : alloc_bits > block->scan_hint)
348 : 36002 : return block->scan_hint_start + block->scan_hint;
349 : :
350 : 840429 : return block->first_free;
351 : : }
352 : :
353 : : /**
354 : : * pcpu_next_md_free_region - finds the next hint free area
355 : : * @chunk: chunk of interest
356 : : * @bit_off: chunk offset
357 : : * @bits: size of free area
358 : : *
359 : : * Helper function for pcpu_for_each_md_free_region. It checks
360 : : * block->contig_hint and performs aggregation across blocks to find the
361 : : * next hint. It modifies bit_off and bits in-place to be consumed in the
362 : : * loop.
363 : : */
364 : 582969 : static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
365 : : int *bits)
366 : : {
367 : 1165938 : int i = pcpu_off_to_block_index(*bit_off);
368 : 582969 : int block_off = pcpu_off_to_block_off(*bit_off);
369 : : struct pcpu_block_md *block;
370 : :
371 : 582969 : *bits = 0;
372 [ + + ]: 4323431 : for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
373 : 3157493 : block++, i++) {
374 : : /* handles contig area across blocks */
375 [ + + ]: 3163603 : if (*bits) {
376 : 2857985 : *bits += block->left_free;
377 [ + + ]: 2857985 : if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
378 : 2856808 : continue;
379 : : return;
380 : : }
381 : :
382 : : /*
383 : : * This checks three things. First is there a contig_hint to
384 : : * check. Second, have we checked this hint before by
385 : : * comparing the block_off. Third, is this the same as the
386 : : * right contig hint. In the last case, it spills over into
387 : : * the next block and should be handled by the contig area
388 : : * across blocks code.
389 : : */
390 : 305618 : *bits = block->contig_hint;
391 [ + + + + : 598703 : if (*bits && block->contig_hint_start >= block_off &&
+ + ]
392 : 293085 : *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
393 : 9866 : *bit_off = pcpu_block_off_to_off(i,
394 : : block->contig_hint_start);
395 : 4933 : return;
396 : : }
397 : : /* reset to satisfy the second predicate above */
398 : : block_off = 0;
399 : :
400 : 300685 : *bits = block->right_free;
401 : 300685 : *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
402 : : }
403 : : }
404 : :
405 : : /**
406 : : * pcpu_next_fit_region - finds fit areas for a given allocation request
407 : : * @chunk: chunk of interest
408 : : * @alloc_bits: size of allocation
409 : : * @align: alignment of area (max PAGE_SIZE)
410 : : * @bit_off: chunk offset
411 : : * @bits: size of free area
412 : : *
413 : : * Finds the next free region that is viable for use with a given size and
414 : : * alignment. This only returns if there is a valid area to be used for this
415 : : * allocation. block->first_free is returned if the allocation request fits
416 : : * within the block to see if the request can be fulfilled prior to the contig
417 : : * hint.
418 : : */
419 : 438884 : static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
420 : : int align, int *bit_off, int *bits)
421 : : {
422 : 877768 : int i = pcpu_off_to_block_index(*bit_off);
423 : 438884 : int block_off = pcpu_off_to_block_off(*bit_off);
424 : : struct pcpu_block_md *block;
425 : :
426 : 438884 : *bits = 0;
427 [ + - ]: 895309 : for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
428 : 17541 : block++, i++) {
429 : : /* handles contig area across blocks */
430 [ + + ]: 456425 : if (*bits) {
431 : 1622 : *bits += block->left_free;
432 [ + + ]: 1622 : if (*bits >= alloc_bits)
433 : : return;
434 [ - + ]: 293 : if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
435 : 0 : continue;
436 : : }
437 : :
438 : : /* check block->contig_hint */
439 : 455096 : *bits = ALIGN(block->contig_hint_start, align) -
440 : : block->contig_hint_start;
441 : : /*
442 : : * This uses the block offset to determine if this has been
443 : : * checked in the prior iteration.
444 : : */
445 [ + + + + ]: 910151 : if (block->contig_hint &&
446 [ + + ]: 901612 : block->contig_hint_start >= block_off &&
447 : 446557 : block->contig_hint >= *bits + alloc_bits) {
448 : : int start = pcpu_next_hint(block, alloc_bits);
449 : :
450 : 437547 : *bits += alloc_bits + block->contig_hint_start -
451 : : start;
452 : 437547 : *bit_off = pcpu_block_off_to_off(i, start);
453 : 437547 : return;
454 : : }
455 : : /* reset to satisfy the second predicate above */
456 : : block_off = 0;
457 : :
458 : 17549 : *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
459 : : align);
460 : 17549 : *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
461 : 35098 : *bit_off = pcpu_block_off_to_off(i, *bit_off);
462 [ + + ]: 17549 : if (*bits >= alloc_bits)
463 : : return;
464 : : }
465 : :
466 : : /* no valid offsets were found - fail condition */
467 : 0 : *bit_off = pcpu_chunk_map_bits(chunk);
468 : : }
469 : :
470 : : /*
471 : : * Metadata free area iterators. These perform aggregation of free areas
472 : : * based on the metadata blocks and return the offset @bit_off and size in
473 : : * bits of the free area @bits. pcpu_for_each_fit_region only returns when
474 : : * a fit is found for the allocation request.
475 : : */
476 : : #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
477 : : for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
478 : : (bit_off) < pcpu_chunk_map_bits((chunk)); \
479 : : (bit_off) += (bits) + 1, \
480 : : pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
481 : :
482 : : #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
483 : : for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
484 : : &(bits)); \
485 : : (bit_off) < pcpu_chunk_map_bits((chunk)); \
486 : : (bit_off) += (bits), \
487 : : pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
488 : : &(bits)))
489 : :
490 : : /**
491 : : * pcpu_mem_zalloc - allocate memory
492 : : * @size: bytes to allocate
493 : : * @gfp: allocation flags
494 : : *
495 : : * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
496 : : * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
497 : : * This is to facilitate passing through whitelisted flags. The
498 : : * returned memory is always zeroed.
499 : : *
500 : : * RETURNS:
501 : : * Pointer to the allocated area on success, NULL on failure.
502 : : */
503 : 1035 : static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
504 : : {
505 [ - + # # : 1035 : if (WARN_ON_ONCE(!slab_is_available()))
+ - ]
506 : : return NULL;
507 : :
508 [ + - ]: 1035 : if (size <= PAGE_SIZE)
509 : 1035 : return kzalloc(size, gfp);
510 : : else
511 : 0 : return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
512 : : }
513 : :
514 : : /**
515 : : * pcpu_mem_free - free memory
516 : : * @ptr: memory to free
517 : : *
518 : : * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
519 : : */
520 : : static void pcpu_mem_free(void *ptr)
521 : : {
522 : 0 : kvfree(ptr);
523 : : }
524 : :
525 : 3105 : static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
526 : : bool move_front)
527 : : {
528 [ + + ]: 3105 : if (chunk != pcpu_reserved_chunk) {
529 [ + + ]: 2898 : if (move_front)
530 : 414 : list_move(&chunk->list, &pcpu_slot[slot]);
531 : : else
532 : 2484 : list_move_tail(&chunk->list, &pcpu_slot[slot]);
533 : : }
534 : 3105 : }
535 : :
536 : : static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
537 : : {
538 : 0 : __pcpu_chunk_move(chunk, slot, true);
539 : : }
540 : :
541 : : /**
542 : : * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
543 : : * @chunk: chunk of interest
544 : : * @oslot: the previous slot it was on
545 : : *
546 : : * This function is called after an allocation or free changed @chunk.
547 : : * New slot according to the changed state is determined and @chunk is
548 : : * moved to the slot. Note that the reserved chunk is never put on
549 : : * chunk slots.
550 : : *
551 : : * CONTEXT:
552 : : * pcpu_lock.
553 : : */
554 : 515744 : static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
555 : : {
556 : 515744 : int nslot = pcpu_chunk_slot(chunk);
557 : :
558 [ + + ]: 515744 : if (oslot != nslot)
559 : 3105 : __pcpu_chunk_move(chunk, nslot, oslot < nslot);
560 : 515744 : }
561 : :
562 : : /*
563 : : * pcpu_update_empty_pages - update empty page counters
564 : : * @chunk: chunk of interest
565 : : * @nr: nr of empty pages
566 : : *
567 : : * This is used to keep track of the empty pages now based on the premise
568 : : * a md_block covers a page. The hint update functions recognize if a block
569 : : * is made full or broken to calculate deltas for keeping track of free pages.
570 : : */
571 : : static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
572 : : {
573 : 2912 : chunk->nr_empty_pop_pages += nr;
574 [ # # + - : 2912 : if (chunk != pcpu_reserved_chunk)
+ - + - ]
575 : 2912 : pcpu_nr_empty_pop_pages += nr;
576 : : }
577 : :
578 : : /*
579 : : * pcpu_region_overlap - determines if two regions overlap
580 : : * @a: start of first region, inclusive
581 : : * @b: end of first region, exclusive
582 : : * @x: start of second region, inclusive
583 : : * @y: end of second region, exclusive
584 : : *
585 : : * This is used to determine if the hint region [a, b) overlaps with the
586 : : * allocated region [x, y).
587 : : */
588 : : static inline bool pcpu_region_overlap(int a, int b, int x, int y)
589 : : {
590 : 1758020 : return (a < y) && (x < b);
591 : : }
592 : :
593 : : /**
594 : : * pcpu_block_update - updates a block given a free area
595 : : * @block: block of interest
596 : : * @start: start offset in block
597 : : * @end: end offset in block
598 : : *
599 : : * Updates a block given a known free area. The region [start, end) is
600 : : * expected to be the entirety of the free area within a block. Chooses
601 : : * the best starting offset if the contig hints are equal.
602 : : */
603 : 802492 : static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
604 : : {
605 : 802492 : int contig = end - start;
606 : :
607 : 802492 : block->first_free = min(block->first_free, start);
608 [ + + ]: 802492 : if (start == 0)
609 : 531 : block->left_free = contig;
610 : :
611 [ + + ]: 802492 : if (end == block->nr_bits)
612 : 581746 : block->right_free = contig;
613 : :
614 [ + + ]: 802492 : if (contig > block->contig_hint) {
615 : : /* promote the old contig_hint to be the new scan_hint */
616 [ + + ]: 632560 : if (start > block->contig_hint_start) {
617 [ + + ]: 604976 : if (block->contig_hint > block->scan_hint) {
618 : 52604 : block->scan_hint_start =
619 : : block->contig_hint_start;
620 : 52604 : block->scan_hint = block->contig_hint;
621 [ + + ]: 552372 : } else if (start < block->scan_hint_start) {
622 : : /*
623 : : * The old contig_hint == scan_hint. But, the
624 : : * new contig is larger so hold the invariant
625 : : * scan_hint_start < contig_hint_start.
626 : : */
627 : 4126 : block->scan_hint = 0;
628 : : }
629 : : } else {
630 : 27584 : block->scan_hint = 0;
631 : : }
632 : 632560 : block->contig_hint_start = start;
633 : 632560 : block->contig_hint = contig;
634 [ + + ]: 169932 : } else if (contig == block->contig_hint) {
635 [ + + + + ]: 5331 : if (block->contig_hint_start &&
636 [ + + ]: 5326 : (!start ||
637 : 10652 : __ffs(start) > __ffs(block->contig_hint_start))) {
638 : : /* start has a better alignment so use it */
639 : 1597 : block->contig_hint_start = start;
640 [ + + + + ]: 1891 : if (start < block->scan_hint_start &&
641 : 294 : block->contig_hint > block->scan_hint)
642 : 280 : block->scan_hint = 0;
643 [ + + + + ]: 5057 : } else if (start > block->scan_hint_start ||
644 : 1323 : block->contig_hint > block->scan_hint) {
645 : : /*
646 : : * Knowing contig == contig_hint, update the scan_hint
647 : : * if it is farther than or larger than the current
648 : : * scan_hint.
649 : : */
650 : 3606 : block->scan_hint_start = start;
651 : 3606 : block->scan_hint = contig;
652 : : }
653 : : } else {
654 : : /*
655 : : * The region is smaller than the contig_hint. So only update
656 : : * the scan_hint if it is larger than or equal and farther than
657 : : * the current scan_hint.
658 : : */
659 [ + + + + ]: 297025 : if ((start < block->contig_hint_start &&
660 [ + + ]: 244022 : (contig > block->scan_hint ||
661 [ + + ]: 5076 : (contig == block->scan_hint &&
662 : 5076 : start > block->scan_hint_start)))) {
663 : 24199 : block->scan_hint_start = start;
664 : 24199 : block->scan_hint = contig;
665 : : }
666 : : }
667 : 802492 : }
668 : :
669 : : /*
670 : : * pcpu_block_update_scan - update a block given a free area from a scan
671 : : * @chunk: chunk of interest
672 : : * @bit_off: chunk offset
673 : : * @bits: size of free area
674 : : *
675 : : * Finding the final allocation spot first goes through pcpu_find_block_fit()
676 : : * to find a block that can hold the allocation and then pcpu_alloc_area()
677 : : * where a scan is used. When allocations require specific alignments,
678 : : * we can inadvertently create holes which will not be seen in the alloc
679 : : * or free paths.
680 : : *
681 : : * This takes a given free area hole and updates a block as it may change the
682 : : * scan_hint. We need to scan backwards to ensure we don't miss free bits
683 : : * from alignment.
684 : : */
685 : 6480 : static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
686 : : int bits)
687 : : {
688 : 6480 : int s_off = pcpu_off_to_block_off(bit_off);
689 : 6480 : int e_off = s_off + bits;
690 : : int s_index, l_bit;
691 : : struct pcpu_block_md *block;
692 : :
693 [ + - ]: 6480 : if (e_off > PCPU_BITMAP_BLOCK_BITS)
694 : 6480 : return;
695 : :
696 : : s_index = pcpu_off_to_block_index(bit_off);
697 : 6480 : block = chunk->md_blocks + s_index;
698 : :
699 : : /* scan backwards in case of alignment skipping free bits */
700 : 6480 : l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
701 [ + + ]: 6480 : s_off = (s_off == l_bit) ? 0 : l_bit + 1;
702 : :
703 : 6480 : pcpu_block_update(block, s_off, e_off);
704 : : }
705 : :
706 : : /**
707 : : * pcpu_chunk_refresh_hint - updates metadata about a chunk
708 : : * @chunk: chunk of interest
709 : : * @full_scan: if we should scan from the beginning
710 : : *
711 : : * Iterates over the metadata blocks to find the largest contig area.
712 : : * A full scan can be avoided on the allocation path as this is triggered
713 : : * if we broke the contig_hint. In doing so, the scan_hint will be before
714 : : * the contig_hint or after if the scan_hint == contig_hint. This cannot
715 : : * be prevented on freeing as we want to find the largest area possibly
716 : : * spanning blocks.
717 : : */
718 : 289775 : static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
719 : : {
720 : 289775 : struct pcpu_block_md *chunk_md = &chunk->chunk_md;
721 : : int bit_off, bits;
722 : :
723 : : /* promote scan_hint to contig_hint */
724 [ + + + + ]: 289775 : if (!full_scan && chunk_md->scan_hint) {
725 : 7191 : bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
726 : 7191 : chunk_md->contig_hint_start = chunk_md->scan_hint_start;
727 : 7191 : chunk_md->contig_hint = chunk_md->scan_hint;
728 : 7191 : chunk_md->scan_hint = 0;
729 : : } else {
730 : 282584 : bit_off = chunk_md->first_free;
731 : 282584 : chunk_md->contig_hint = 0;
732 : : }
733 : :
734 : 289775 : bits = 0;
735 [ + + ]: 1165938 : pcpu_for_each_md_free_region(chunk, bit_off, bits) {
736 : 293194 : pcpu_block_update(chunk_md, bit_off, bit_off + bits);
737 : : }
738 : 289775 : }
739 : :
740 : : /**
741 : : * pcpu_block_refresh_hint
742 : : * @chunk: chunk of interest
743 : : * @index: index of the metadata block
744 : : *
745 : : * Scans over the block beginning at first_free and updates the block
746 : : * metadata accordingly.
747 : : */
748 : 318269 : static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
749 : : {
750 : 318269 : struct pcpu_block_md *block = chunk->md_blocks + index;
751 : : unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
752 : : int rs, re, start; /* region start, region end */
753 : :
754 : : /* promote scan_hint to contig_hint */
755 [ + + ]: 318269 : if (block->scan_hint) {
756 : 28169 : start = block->scan_hint_start + block->scan_hint;
757 : 28169 : block->contig_hint_start = block->scan_hint_start;
758 : 28169 : block->contig_hint = block->scan_hint;
759 : 28169 : block->scan_hint = 0;
760 : : } else {
761 : 290100 : start = block->first_free;
762 : 290100 : block->contig_hint = 0;
763 : : }
764 : :
765 : 318269 : block->right_free = 0;
766 : :
767 : : /* iterate over free areas and update the contig hints */
768 [ + + ]: 668196 : pcpu_for_each_unpop_region(alloc_map, rs, re, start,
769 : : PCPU_BITMAP_BLOCK_BITS) {
770 : 349927 : pcpu_block_update(block, rs, re);
771 : : }
772 : 318269 : }
773 : :
774 : : /**
775 : : * pcpu_block_update_hint_alloc - update hint on allocation path
776 : : * @chunk: chunk of interest
777 : : * @bit_off: chunk offset
778 : : * @bits: size of request
779 : : *
780 : : * Updates metadata for the allocation path. The metadata only has to be
781 : : * refreshed by a full scan iff the chunk's contig hint is broken. Block level
782 : : * scans are required if the block's contig hint is broken.
783 : : */
784 : 439505 : static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
785 : : int bits)
786 : : {
787 : : struct pcpu_block_md *chunk_md = &chunk->chunk_md;
788 : : int nr_empty_pages = 0;
789 : : struct pcpu_block_md *s_block, *e_block, *block;
790 : : int s_index, e_index; /* block indexes of the freed allocation */
791 : : int s_off, e_off; /* block offsets of the freed allocation */
792 : :
793 : : /*
794 : : * Calculate per block offsets.
795 : : * The calculation uses an inclusive range, but the resulting offsets
796 : : * are [start, end). e_index always points to the last block in the
797 : : * range.
798 : : */
799 : 439505 : s_index = pcpu_off_to_block_index(bit_off);
800 : 879010 : e_index = pcpu_off_to_block_index(bit_off + bits - 1);
801 : 439505 : s_off = pcpu_off_to_block_off(bit_off);
802 : 439505 : e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
803 : :
804 : 439505 : s_block = chunk->md_blocks + s_index;
805 : 439505 : e_block = chunk->md_blocks + e_index;
806 : :
807 : : /*
808 : : * Update s_block.
809 : : * block->first_free must be updated if the allocation takes its place.
810 : : * If the allocation breaks the contig_hint, a scan is required to
811 : : * restore this hint.
812 : : */
813 [ + + ]: 439505 : if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
814 : : nr_empty_pages++;
815 : :
816 [ + + ]: 439505 : if (s_off == s_block->first_free)
817 : 391438 : s_block->first_free = find_next_zero_bit(
818 : : pcpu_index_alloc_map(chunk, s_index),
819 : : PCPU_BITMAP_BLOCK_BITS,
820 : : s_off + bits);
821 : :
822 [ + + ]: 879010 : if (pcpu_region_overlap(s_block->scan_hint_start,
823 : 439505 : s_block->scan_hint_start + s_block->scan_hint,
824 : : s_off,
825 : : s_off + bits))
826 : 19075 : s_block->scan_hint = 0;
827 : :
828 [ + + ]: 879010 : if (pcpu_region_overlap(s_block->contig_hint_start,
829 : 439505 : s_block->contig_hint_start +
830 : 439505 : s_block->contig_hint,
831 : : s_off,
832 : : s_off + bits)) {
833 : : /* block contig hint is broken - scan to fix it */
834 [ + + ]: 317003 : if (!s_off)
835 : 1018 : s_block->left_free = 0;
836 : 317003 : pcpu_block_refresh_hint(chunk, s_index);
837 : : } else {
838 : : /* update left and right contig manually */
839 : 122502 : s_block->left_free = min(s_block->left_free, s_off);
840 [ + + ]: 122502 : if (s_index == e_index)
841 : 122415 : s_block->right_free = min_t(int, s_block->right_free,
842 : : PCPU_BITMAP_BLOCK_BITS - e_off);
843 : : else
844 : 87 : s_block->right_free = 0;
845 : : }
846 : :
847 : : /*
848 : : * Update e_block.
849 : : */
850 [ + + ]: 439505 : if (s_index != e_index) {
851 [ + + ]: 1329 : if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
852 : 1266 : nr_empty_pages++;
853 : :
854 : : /*
855 : : * When the allocation is across blocks, the end is along
856 : : * the left part of the e_block.
857 : : */
858 : 1329 : e_block->first_free = find_next_zero_bit(
859 : : pcpu_index_alloc_map(chunk, e_index),
860 : : PCPU_BITMAP_BLOCK_BITS, e_off);
861 : :
862 [ - + ]: 1329 : if (e_off == PCPU_BITMAP_BLOCK_BITS) {
863 : : /* reset the block */
864 : 0 : e_block++;
865 : : } else {
866 [ + + ]: 1329 : if (e_off > e_block->scan_hint_start)
867 : 1300 : e_block->scan_hint = 0;
868 : :
869 : 1329 : e_block->left_free = 0;
870 [ + + ]: 1329 : if (e_off > e_block->contig_hint_start) {
871 : : /* contig hint is broken - scan to fix it */
872 : 1266 : pcpu_block_refresh_hint(chunk, e_index);
873 : : } else {
874 : 63 : e_block->right_free =
875 : 63 : min_t(int, e_block->right_free,
876 : : PCPU_BITMAP_BLOCK_BITS - e_off);
877 : : }
878 : : }
879 : :
880 : : /* update in-between md_blocks */
881 : 1329 : nr_empty_pages += (e_index - s_index - 1);
882 [ - + ]: 1329 : for (block = s_block + 1; block < e_block; block++) {
883 : 0 : block->scan_hint = 0;
884 : 0 : block->contig_hint = 0;
885 : 0 : block->left_free = 0;
886 : 0 : block->right_free = 0;
887 : : }
888 : : }
889 : :
890 [ + + ]: 439505 : if (nr_empty_pages)
891 : : pcpu_update_empty_pages(chunk, -nr_empty_pages);
892 : :
893 [ + + ]: 879010 : if (pcpu_region_overlap(chunk_md->scan_hint_start,
894 : 439505 : chunk_md->scan_hint_start +
895 : 439505 : chunk_md->scan_hint,
896 : : bit_off,
897 : : bit_off + bits))
898 : 3496 : chunk_md->scan_hint = 0;
899 : :
900 : : /*
901 : : * The only time a full chunk scan is required is if the chunk
902 : : * contig hint is broken. Otherwise, it means a smaller space
903 : : * was used and therefore the chunk contig hint is still correct.
904 : : */
905 [ + + ]: 879010 : if (pcpu_region_overlap(chunk_md->contig_hint_start,
906 : 439505 : chunk_md->contig_hint_start +
907 : 439505 : chunk_md->contig_hint,
908 : : bit_off,
909 : : bit_off + bits))
910 : 289687 : pcpu_chunk_refresh_hint(chunk, false);
911 : 439505 : }
912 : :
913 : : /**
914 : : * pcpu_block_update_hint_free - updates the block hints on the free path
915 : : * @chunk: chunk of interest
916 : : * @bit_off: chunk offset
917 : : * @bits: size of request
918 : : *
919 : : * Updates metadata for the allocation path. This avoids a blind block
920 : : * refresh by making use of the block contig hints. If this fails, it scans
921 : : * forward and backward to determine the extent of the free area. This is
922 : : * capped at the boundary of blocks.
923 : : *
924 : : * A chunk update is triggered if a page becomes free, a block becomes free,
925 : : * or the free spans across blocks. This tradeoff is to minimize iterating
926 : : * over the block metadata to update chunk_md->contig_hint.
927 : : * chunk_md->contig_hint may be off by up to a page, but it will never be more
928 : : * than the available space. If the contig hint is contained in one block, it
929 : : * will be accurate.
930 : : */
931 : 76446 : static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
932 : : int bits)
933 : : {
934 : : int nr_empty_pages = 0;
935 : : struct pcpu_block_md *s_block, *e_block, *block;
936 : : int s_index, e_index; /* block indexes of the freed allocation */
937 : : int s_off, e_off; /* block offsets of the freed allocation */
938 : : int start, end; /* start and end of the whole free area */
939 : :
940 : : /*
941 : : * Calculate per block offsets.
942 : : * The calculation uses an inclusive range, but the resulting offsets
943 : : * are [start, end). e_index always points to the last block in the
944 : : * range.
945 : : */
946 : 76446 : s_index = pcpu_off_to_block_index(bit_off);
947 : 152892 : e_index = pcpu_off_to_block_index(bit_off + bits - 1);
948 : 76446 : s_off = pcpu_off_to_block_off(bit_off);
949 : 76446 : e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
950 : :
951 : 76446 : s_block = chunk->md_blocks + s_index;
952 : 76446 : e_block = chunk->md_blocks + e_index;
953 : :
954 : : /*
955 : : * Check if the freed area aligns with the block->contig_hint.
956 : : * If it does, then the scan to find the beginning/end of the
957 : : * larger free area can be avoided.
958 : : *
959 : : * start and end refer to beginning and end of the free area
960 : : * within each their respective blocks. This is not necessarily
961 : : * the entire free area as it may span blocks past the beginning
962 : : * or end of the block.
963 : : */
964 : : start = s_off;
965 [ + + ]: 76446 : if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
966 : : start = s_block->contig_hint_start;
967 : : } else {
968 : : /*
969 : : * Scan backwards to find the extent of the free area.
970 : : * find_last_bit returns the starting bit, so if the start bit
971 : : * is returned, that means there was no last bit and the
972 : : * remainder of the chunk is free.
973 : : */
974 : 72686 : int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
975 : : start);
976 [ + + ]: 72686 : start = (start == l_bit) ? 0 : l_bit + 1;
977 : : }
978 : :
979 : : end = e_off;
980 [ + + ]: 76446 : if (e_off == e_block->contig_hint_start)
981 : 3345 : end = e_block->contig_hint_start + e_block->contig_hint;
982 : : else
983 : 73101 : end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
984 : : PCPU_BITMAP_BLOCK_BITS, end);
985 : :
986 : : /* update s_block */
987 [ + + ]: 76446 : e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
988 [ + + ]: 76446 : if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
989 : : nr_empty_pages++;
990 : 76446 : pcpu_block_update(s_block, start, e_off);
991 : :
992 : : /* freeing in the same block */
993 [ + + ]: 76446 : if (s_index != e_index) {
994 : : /* update e_block */
995 [ + + ]: 87 : if (end == PCPU_BITMAP_BLOCK_BITS)
996 : 6 : nr_empty_pages++;
997 : 87 : pcpu_block_update(e_block, 0, end);
998 : :
999 : : /* reset md_blocks in the middle */
1000 : 87 : nr_empty_pages += (e_index - s_index - 1);
1001 [ - + ]: 87 : for (block = s_block + 1; block < e_block; block++) {
1002 : 0 : block->first_free = 0;
1003 : 0 : block->scan_hint = 0;
1004 : 0 : block->contig_hint_start = 0;
1005 : 0 : block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1006 : 0 : block->left_free = PCPU_BITMAP_BLOCK_BITS;
1007 : 0 : block->right_free = PCPU_BITMAP_BLOCK_BITS;
1008 : : }
1009 : : }
1010 : :
1011 [ + + ]: 76446 : if (nr_empty_pages)
1012 : : pcpu_update_empty_pages(chunk, nr_empty_pages);
1013 : :
1014 : : /*
1015 : : * Refresh chunk metadata when the free makes a block free or spans
1016 : : * across blocks. The contig_hint may be off by up to a page, but if
1017 : : * the contig_hint is contained in a block, it will be accurate with
1018 : : * the else condition below.
1019 : : */
1020 [ + + + + ]: 76446 : if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1021 : 88 : pcpu_chunk_refresh_hint(chunk, true);
1022 : : else
1023 : 152716 : pcpu_block_update(&chunk->chunk_md,
1024 : : pcpu_block_off_to_off(s_index, start),
1025 : : end);
1026 : 76446 : }
1027 : :
1028 : : /**
1029 : : * pcpu_is_populated - determines if the region is populated
1030 : : * @chunk: chunk of interest
1031 : : * @bit_off: chunk offset
1032 : : * @bits: size of area
1033 : : * @next_off: return value for the next offset to start searching
1034 : : *
1035 : : * For atomic allocations, check if the backing pages are populated.
1036 : : *
1037 : : * RETURNS:
1038 : : * Bool if the backing pages are populated.
1039 : : * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1040 : : */
1041 : 458 : static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1042 : : int *next_off)
1043 : : {
1044 : : int page_start, page_end, rs, re;
1045 : :
1046 : 458 : page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1047 : 458 : page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1048 : :
1049 : 458 : rs = page_start;
1050 : 458 : pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
1051 [ - + ]: 458 : if (rs >= page_end)
1052 : : return true;
1053 : :
1054 : 0 : *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1055 : 0 : return false;
1056 : : }
1057 : :
1058 : : /**
1059 : : * pcpu_find_block_fit - finds the block index to start searching
1060 : : * @chunk: chunk of interest
1061 : : * @alloc_bits: size of request in allocation units
1062 : : * @align: alignment of area (max PAGE_SIZE bytes)
1063 : : * @pop_only: use populated regions only
1064 : : *
1065 : : * Given a chunk and an allocation spec, find the offset to begin searching
1066 : : * for a free region. This iterates over the bitmap metadata blocks to
1067 : : * find an offset that will be guaranteed to fit the requirements. It is
1068 : : * not quite first fit as if the allocation does not fit in the contig hint
1069 : : * of a block or chunk, it is skipped. This errs on the side of caution
1070 : : * to prevent excess iteration. Poor alignment can cause the allocator to
1071 : : * skip over blocks and chunks that have valid free areas.
1072 : : *
1073 : : * RETURNS:
1074 : : * The offset in the bitmap to begin searching.
1075 : : * -1 if no offset is found.
1076 : : */
1077 : 438884 : static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1078 : : size_t align, bool pop_only)
1079 : : {
1080 : : struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1081 : : int bit_off, bits, next_off;
1082 : :
1083 : : /*
1084 : : * Check to see if the allocation can fit in the chunk's contig hint.
1085 : : * This is an optimization to prevent scanning by assuming if it
1086 : : * cannot fit in the global hint, there is memory pressure and creating
1087 : : * a new chunk would happen soon.
1088 : : */
1089 : 438884 : bit_off = ALIGN(chunk_md->contig_hint_start, align) -
1090 : : chunk_md->contig_hint_start;
1091 [ + - ]: 438884 : if (bit_off + alloc_bits > chunk_md->contig_hint)
1092 : : return -1;
1093 : :
1094 : 438884 : bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1095 : 438884 : bits = 0;
1096 [ + - ]: 877768 : pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1097 [ + + - + ]: 438884 : if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1098 : : &next_off))
1099 : : break;
1100 : :
1101 : 0 : bit_off = next_off;
1102 : 0 : bits = 0;
1103 : : }
1104 : :
1105 [ + - ]: 438884 : if (bit_off == pcpu_chunk_map_bits(chunk))
1106 : : return -1;
1107 : :
1108 : 438884 : return bit_off;
1109 : : }
1110 : :
1111 : : /*
1112 : : * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1113 : : * @map: the address to base the search on
1114 : : * @size: the bitmap size in bits
1115 : : * @start: the bitnumber to start searching at
1116 : : * @nr: the number of zeroed bits we're looking for
1117 : : * @align_mask: alignment mask for zero area
1118 : : * @largest_off: offset of the largest area skipped
1119 : : * @largest_bits: size of the largest area skipped
1120 : : *
1121 : : * The @align_mask should be one less than a power of 2.
1122 : : *
1123 : : * This is a modified version of bitmap_find_next_zero_area_off() to remember
1124 : : * the largest area that was skipped. This is imperfect, but in general is
1125 : : * good enough. The largest remembered region is the largest failed region
1126 : : * seen. This does not include anything we possibly skipped due to alignment.
1127 : : * pcpu_block_update_scan() does scan backwards to try and recover what was
1128 : : * lost to alignment. While this can cause scanning to miss earlier possible
1129 : : * free areas, smaller allocations will eventually fill those holes.
1130 : : */
1131 : 438884 : static unsigned long pcpu_find_zero_area(unsigned long *map,
1132 : : unsigned long size,
1133 : : unsigned long start,
1134 : : unsigned long nr,
1135 : : unsigned long align_mask,
1136 : : unsigned long *largest_off,
1137 : : unsigned long *largest_bits)
1138 : : {
1139 : : unsigned long index, end, i, area_off, area_bits;
1140 : : again:
1141 : 455964 : index = find_next_zero_bit(map, size, start);
1142 : :
1143 : : /* Align allocation */
1144 : 455964 : index = __ALIGN_MASK(index, align_mask);
1145 : : area_off = index;
1146 : :
1147 : 455964 : end = index + nr;
1148 [ - + ]: 455964 : if (end > size)
1149 : 0 : return end;
1150 : 455964 : i = find_next_bit(map, end, index);
1151 [ + + ]: 455964 : if (i < end) {
1152 : 17080 : area_bits = i - area_off;
1153 : : /* remember largest unused area with best alignment */
1154 [ + + + + ]: 17080 : if (area_bits > *largest_bits ||
1155 [ + + + - ]: 5701 : (area_bits == *largest_bits && *largest_off &&
1156 [ + + ]: 1421 : (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1157 : 8147 : *largest_off = area_off;
1158 : 8147 : *largest_bits = area_bits;
1159 : : }
1160 : :
1161 : 17080 : start = i + 1;
1162 : 17080 : goto again;
1163 : : }
1164 : 438884 : return index;
1165 : : }
1166 : :
1167 : : /**
1168 : : * pcpu_alloc_area - allocates an area from a pcpu_chunk
1169 : : * @chunk: chunk of interest
1170 : : * @alloc_bits: size of request in allocation units
1171 : : * @align: alignment of area (max PAGE_SIZE)
1172 : : * @start: bit_off to start searching
1173 : : *
1174 : : * This function takes in a @start offset to begin searching to fit an
1175 : : * allocation of @alloc_bits with alignment @align. It needs to scan
1176 : : * the allocation map because if it fits within the block's contig hint,
1177 : : * @start will be block->first_free. This is an attempt to fill the
1178 : : * allocation prior to breaking the contig hint. The allocation and
1179 : : * boundary maps are updated accordingly if it confirms a valid
1180 : : * free area.
1181 : : *
1182 : : * RETURNS:
1183 : : * Allocated addr offset in @chunk on success.
1184 : : * -1 if no matching area is found.
1185 : : */
1186 : 438884 : static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1187 : : size_t align, int start)
1188 : : {
1189 : : struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1190 [ + - ]: 438884 : size_t align_mask = (align) ? (align - 1) : 0;
1191 : 438884 : unsigned long area_off = 0, area_bits = 0;
1192 : : int bit_off, end, oslot;
1193 : :
1194 : : lockdep_assert_held(&pcpu_lock);
1195 : :
1196 : 438884 : oslot = pcpu_chunk_slot(chunk);
1197 : :
1198 : : /*
1199 : : * Search to find a fit.
1200 : : */
1201 : 877768 : end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1202 : : pcpu_chunk_map_bits(chunk));
1203 : 438884 : bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1204 : : align_mask, &area_off, &area_bits);
1205 [ + - ]: 438884 : if (bit_off >= end)
1206 : : return -1;
1207 : :
1208 [ + + ]: 438884 : if (area_bits)
1209 : 6480 : pcpu_block_update_scan(chunk, area_off, area_bits);
1210 : :
1211 : : /* update alloc map */
1212 : 438884 : bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1213 : :
1214 : : /* update boundary map */
1215 : 438884 : set_bit(bit_off, chunk->bound_map);
1216 : 438884 : bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1217 : 438884 : set_bit(bit_off + alloc_bits, chunk->bound_map);
1218 : :
1219 : 438884 : chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1220 : :
1221 : : /* update first free bit */
1222 [ + + ]: 438884 : if (bit_off == chunk_md->first_free)
1223 : 758156 : chunk_md->first_free = find_next_zero_bit(
1224 : : chunk->alloc_map,
1225 : : pcpu_chunk_map_bits(chunk),
1226 : : bit_off + alloc_bits);
1227 : :
1228 : 438884 : pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1229 : :
1230 : 438884 : pcpu_chunk_relocate(chunk, oslot);
1231 : :
1232 : 438884 : return bit_off * PCPU_MIN_ALLOC_SIZE;
1233 : : }
1234 : :
1235 : : /**
1236 : : * pcpu_free_area - frees the corresponding offset
1237 : : * @chunk: chunk of interest
1238 : : * @off: addr offset into chunk
1239 : : *
1240 : : * This function determines the size of an allocation to free using
1241 : : * the boundary bitmap and clears the allocation map.
1242 : : */
1243 : 76446 : static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
1244 : : {
1245 : : struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1246 : : int bit_off, bits, end, oslot;
1247 : :
1248 : : lockdep_assert_held(&pcpu_lock);
1249 : : pcpu_stats_area_dealloc(chunk);
1250 : :
1251 : 76446 : oslot = pcpu_chunk_slot(chunk);
1252 : :
1253 : 76446 : bit_off = off / PCPU_MIN_ALLOC_SIZE;
1254 : :
1255 : : /* find end index */
1256 : 152892 : end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1257 : : bit_off + 1);
1258 : 76446 : bits = end - bit_off;
1259 : 76446 : bitmap_clear(chunk->alloc_map, bit_off, bits);
1260 : :
1261 : : /* update metadata */
1262 : 76446 : chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE;
1263 : :
1264 : : /* update first free bit */
1265 : 76446 : chunk_md->first_free = min(chunk_md->first_free, bit_off);
1266 : :
1267 : 76446 : pcpu_block_update_hint_free(chunk, bit_off, bits);
1268 : :
1269 : 76446 : pcpu_chunk_relocate(chunk, oslot);
1270 : 76446 : }
1271 : :
1272 : : static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1273 : : {
1274 : 6624 : block->scan_hint = 0;
1275 : 6624 : block->contig_hint = nr_bits;
1276 : 6624 : block->left_free = nr_bits;
1277 : 6624 : block->right_free = nr_bits;
1278 : 6624 : block->first_free = 0;
1279 : 6624 : block->nr_bits = nr_bits;
1280 : : }
1281 : :
1282 : 621 : static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1283 : : {
1284 : : struct pcpu_block_md *md_block;
1285 : :
1286 : : /* init the chunk's block */
1287 : : pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1288 : :
1289 [ + + ]: 7245 : for (md_block = chunk->md_blocks;
1290 : 13248 : md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1291 : 6003 : md_block++)
1292 : : pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1293 : 621 : }
1294 : :
1295 : : /**
1296 : : * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1297 : : * @tmp_addr: the start of the region served
1298 : : * @map_size: size of the region served
1299 : : *
1300 : : * This is responsible for creating the chunks that serve the first chunk. The
1301 : : * base_addr is page aligned down of @tmp_addr while the region end is page
1302 : : * aligned up. Offsets are kept track of to determine the region served. All
1303 : : * this is done to appease the bitmap allocator in avoiding partial blocks.
1304 : : *
1305 : : * RETURNS:
1306 : : * Chunk serving the region at @tmp_addr of @map_size.
1307 : : */
1308 : 414 : static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1309 : : int map_size)
1310 : : {
1311 : : struct pcpu_chunk *chunk;
1312 : : unsigned long aligned_addr, lcm_align;
1313 : : int start_offset, offset_bits, region_size, region_bits;
1314 : : size_t alloc_size;
1315 : :
1316 : : /* region calculations */
1317 : 414 : aligned_addr = tmp_addr & PAGE_MASK;
1318 : :
1319 : 414 : start_offset = tmp_addr - aligned_addr;
1320 : :
1321 : : /*
1322 : : * Align the end of the region with the LCM of PAGE_SIZE and
1323 : : * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1324 : : * the other.
1325 : : */
1326 : 414 : lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1327 : 414 : region_size = ALIGN(start_offset + map_size, lcm_align);
1328 : :
1329 : : /* allocate chunk */
1330 : 414 : alloc_size = sizeof(struct pcpu_chunk) +
1331 : 414 : BITS_TO_LONGS(region_size >> PAGE_SHIFT);
1332 : 414 : chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1333 [ - + ]: 414 : if (!chunk)
1334 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
1335 : : alloc_size);
1336 : :
1337 : 414 : INIT_LIST_HEAD(&chunk->list);
1338 : :
1339 : 414 : chunk->base_addr = (void *)aligned_addr;
1340 : 414 : chunk->start_offset = start_offset;
1341 : 414 : chunk->end_offset = region_size - chunk->start_offset - map_size;
1342 : :
1343 : 414 : chunk->nr_pages = region_size >> PAGE_SHIFT;
1344 : : region_bits = pcpu_chunk_map_bits(chunk);
1345 : :
1346 : 414 : alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1347 : 414 : chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1348 [ - + ]: 414 : if (!chunk->alloc_map)
1349 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
1350 : : alloc_size);
1351 : :
1352 : 414 : alloc_size =
1353 : 414 : BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1354 : 414 : chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1355 [ - + ]: 414 : if (!chunk->bound_map)
1356 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
1357 : : alloc_size);
1358 : :
1359 : 414 : alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1360 : 414 : chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1361 [ - + ]: 414 : if (!chunk->md_blocks)
1362 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
1363 : : alloc_size);
1364 : :
1365 : 414 : pcpu_init_md_blocks(chunk);
1366 : :
1367 : : /* manage populated page bitmap */
1368 : 414 : chunk->immutable = true;
1369 : 414 : bitmap_fill(chunk->populated, chunk->nr_pages);
1370 : 414 : chunk->nr_populated = chunk->nr_pages;
1371 : 414 : chunk->nr_empty_pop_pages = chunk->nr_pages;
1372 : :
1373 : 414 : chunk->free_bytes = map_size;
1374 : :
1375 [ + - ]: 414 : if (chunk->start_offset) {
1376 : : /* hide the beginning of the bitmap */
1377 : 414 : offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1378 : 414 : bitmap_set(chunk->alloc_map, 0, offset_bits);
1379 : 414 : set_bit(0, chunk->bound_map);
1380 : 414 : set_bit(offset_bits, chunk->bound_map);
1381 : :
1382 : 414 : chunk->chunk_md.first_free = offset_bits;
1383 : :
1384 : 414 : pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1385 : : }
1386 : :
1387 [ + + ]: 414 : if (chunk->end_offset) {
1388 : : /* hide the end of the bitmap */
1389 : 207 : offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1390 : 414 : bitmap_set(chunk->alloc_map,
1391 : 207 : pcpu_chunk_map_bits(chunk) - offset_bits,
1392 : : offset_bits);
1393 : 207 : set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1394 : : chunk->bound_map);
1395 : 207 : set_bit(region_bits, chunk->bound_map);
1396 : :
1397 : 207 : pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1398 : : - offset_bits, offset_bits);
1399 : : }
1400 : :
1401 : 414 : return chunk;
1402 : : }
1403 : :
1404 : 207 : static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1405 : : {
1406 : : struct pcpu_chunk *chunk;
1407 : : int region_bits;
1408 : :
1409 : 207 : chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1410 [ + - ]: 207 : if (!chunk)
1411 : : return NULL;
1412 : :
1413 : 207 : INIT_LIST_HEAD(&chunk->list);
1414 : 207 : chunk->nr_pages = pcpu_unit_pages;
1415 : : region_bits = pcpu_chunk_map_bits(chunk);
1416 : :
1417 : 207 : chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1418 : : sizeof(chunk->alloc_map[0]), gfp);
1419 [ + - ]: 207 : if (!chunk->alloc_map)
1420 : : goto alloc_map_fail;
1421 : :
1422 : 207 : chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1423 : : sizeof(chunk->bound_map[0]), gfp);
1424 [ + - ]: 207 : if (!chunk->bound_map)
1425 : : goto bound_map_fail;
1426 : :
1427 : 207 : chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1428 : : sizeof(chunk->md_blocks[0]), gfp);
1429 [ + - ]: 207 : if (!chunk->md_blocks)
1430 : : goto md_blocks_fail;
1431 : :
1432 : 207 : pcpu_init_md_blocks(chunk);
1433 : :
1434 : : /* init metadata */
1435 : 207 : chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1436 : :
1437 : 207 : return chunk;
1438 : :
1439 : : md_blocks_fail:
1440 : 0 : pcpu_mem_free(chunk->bound_map);
1441 : : bound_map_fail:
1442 : 0 : pcpu_mem_free(chunk->alloc_map);
1443 : : alloc_map_fail:
1444 : : pcpu_mem_free(chunk);
1445 : :
1446 : 0 : return NULL;
1447 : : }
1448 : :
1449 : 0 : static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1450 : : {
1451 [ # # ]: 0 : if (!chunk)
1452 : 0 : return;
1453 : 0 : pcpu_mem_free(chunk->md_blocks);
1454 : 0 : pcpu_mem_free(chunk->bound_map);
1455 : 0 : pcpu_mem_free(chunk->alloc_map);
1456 : : pcpu_mem_free(chunk);
1457 : : }
1458 : :
1459 : : /**
1460 : : * pcpu_chunk_populated - post-population bookkeeping
1461 : : * @chunk: pcpu_chunk which got populated
1462 : : * @page_start: the start page
1463 : : * @page_end: the end page
1464 : : *
1465 : : * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1466 : : * the bookkeeping information accordingly. Must be called after each
1467 : : * successful population.
1468 : : *
1469 : : * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1470 : : * is to serve an allocation in that area.
1471 : : */
1472 : 414 : static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1473 : : int page_end)
1474 : : {
1475 : 414 : int nr = page_end - page_start;
1476 : :
1477 : : lockdep_assert_held(&pcpu_lock);
1478 : :
1479 : 414 : bitmap_set(chunk->populated, page_start, nr);
1480 : 414 : chunk->nr_populated += nr;
1481 : 414 : pcpu_nr_populated += nr;
1482 : :
1483 : : pcpu_update_empty_pages(chunk, nr);
1484 : 414 : }
1485 : :
1486 : : /**
1487 : : * pcpu_chunk_depopulated - post-depopulation bookkeeping
1488 : : * @chunk: pcpu_chunk which got depopulated
1489 : : * @page_start: the start page
1490 : : * @page_end: the end page
1491 : : *
1492 : : * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1493 : : * Update the bookkeeping information accordingly. Must be called after
1494 : : * each successful depopulation.
1495 : : */
1496 : 0 : static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1497 : : int page_start, int page_end)
1498 : : {
1499 : 0 : int nr = page_end - page_start;
1500 : :
1501 : : lockdep_assert_held(&pcpu_lock);
1502 : :
1503 : 0 : bitmap_clear(chunk->populated, page_start, nr);
1504 : 0 : chunk->nr_populated -= nr;
1505 : 0 : pcpu_nr_populated -= nr;
1506 : :
1507 : : pcpu_update_empty_pages(chunk, -nr);
1508 : 0 : }
1509 : :
1510 : : /*
1511 : : * Chunk management implementation.
1512 : : *
1513 : : * To allow different implementations, chunk alloc/free and
1514 : : * [de]population are implemented in a separate file which is pulled
1515 : : * into this file and compiled together. The following functions
1516 : : * should be implemented.
1517 : : *
1518 : : * pcpu_populate_chunk - populate the specified range of a chunk
1519 : : * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1520 : : * pcpu_create_chunk - create a new chunk
1521 : : * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1522 : : * pcpu_addr_to_page - translate address to physical address
1523 : : * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1524 : : */
1525 : : static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1526 : : int page_start, int page_end, gfp_t gfp);
1527 : : static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1528 : : int page_start, int page_end);
1529 : : static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1530 : : static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1531 : : static struct page *pcpu_addr_to_page(void *addr);
1532 : : static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1533 : :
1534 : : #ifdef CONFIG_NEED_PER_CPU_KM
1535 : : #include "percpu-km.c"
1536 : : #else
1537 : : #include "percpu-vm.c"
1538 : : #endif
1539 : :
1540 : : /**
1541 : : * pcpu_chunk_addr_search - determine chunk containing specified address
1542 : : * @addr: address for which the chunk needs to be determined.
1543 : : *
1544 : : * This is an internal function that handles all but static allocations.
1545 : : * Static percpu address values should never be passed into the allocator.
1546 : : *
1547 : : * RETURNS:
1548 : : * The address of the found chunk.
1549 : : */
1550 : 76446 : static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1551 : : {
1552 : : /* is it in the dynamic region (first chunk)? */
1553 [ + + ]: 152892 : if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1554 : : return pcpu_first_chunk;
1555 : :
1556 : : /* is it in the reserved region? */
1557 [ + - ]: 150822 : if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1558 : : return pcpu_reserved_chunk;
1559 : :
1560 : : /*
1561 : : * The address is relative to unit0 which might be unused and
1562 : : * thus unmapped. Offset the address to the unit space of the
1563 : : * current processor before looking it up in the vmalloc
1564 : : * space. Note that any possible cpu id can be used here, so
1565 : : * there's no need to worry about preemption or cpu hotplug.
1566 : : */
1567 : 150822 : addr += pcpu_unit_offsets[raw_smp_processor_id()];
1568 : 75411 : return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1569 : : }
1570 : :
1571 : : /**
1572 : : * pcpu_alloc - the percpu allocator
1573 : : * @size: size of area to allocate in bytes
1574 : : * @align: alignment of area (max PAGE_SIZE)
1575 : : * @reserved: allocate from the reserved chunk if available
1576 : : * @gfp: allocation flags
1577 : : *
1578 : : * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1579 : : * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1580 : : * then no warning will be triggered on invalid or failed allocation
1581 : : * requests.
1582 : : *
1583 : : * RETURNS:
1584 : : * Percpu pointer to the allocated area on success, NULL on failure.
1585 : : */
1586 : 438883 : static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1587 : : gfp_t gfp)
1588 : : {
1589 : : /* whitelisted flags that can be passed to the backing allocators */
1590 : 438883 : gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1591 : 438883 : bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1592 : 438883 : bool do_warn = !(gfp & __GFP_NOWARN);
1593 : : static int warn_limit = 10;
1594 : : struct pcpu_chunk *chunk, *next;
1595 : : const char *err;
1596 : : int slot, off, cpu, ret;
1597 : : unsigned long flags;
1598 : : void __percpu *ptr;
1599 : : size_t bits, bit_align;
1600 : :
1601 : : /*
1602 : : * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1603 : : * therefore alignment must be a minimum of that many bytes.
1604 : : * An allocation may have internal fragmentation from rounding up
1605 : : * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1606 : : */
1607 [ + + ]: 438883 : if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1608 : : align = PCPU_MIN_ALLOC_SIZE;
1609 : :
1610 : 438883 : size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1611 : 438883 : bits = size >> PCPU_MIN_ALLOC_SHIFT;
1612 : 438883 : bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1613 : :
1614 [ + - + + : 877764 : if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
+ + - + ]
1615 : : !is_power_of_2(align))) {
1616 [ - + ]: 2 : WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1617 : : size, align);
1618 : : return NULL;
1619 : : }
1620 : :
1621 [ + + ]: 438881 : if (!is_atomic) {
1622 : : /*
1623 : : * pcpu_balance_workfn() allocates memory under this mutex,
1624 : : * and it may wait for memory reclaim. Allow current task
1625 : : * to become OOM victim, in case of memory pressure.
1626 : : */
1627 [ - + ]: 438425 : if (gfp & __GFP_NOFAIL)
1628 : 0 : mutex_lock(&pcpu_alloc_mutex);
1629 [ + - ]: 438425 : else if (mutex_lock_killable(&pcpu_alloc_mutex))
1630 : : return NULL;
1631 : : }
1632 : :
1633 : 438882 : spin_lock_irqsave(&pcpu_lock, flags);
1634 : :
1635 : : /* serve reserved allocations from the reserved chunk if available */
1636 [ + + - + ]: 438884 : if (reserved && pcpu_reserved_chunk) {
1637 : : chunk = pcpu_reserved_chunk;
1638 : :
1639 : 414 : off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1640 [ + - ]: 414 : if (off < 0) {
1641 : : err = "alloc from reserved chunk failed";
1642 : : goto fail_unlock;
1643 : : }
1644 : :
1645 : 414 : off = pcpu_alloc_area(chunk, bits, bit_align, off);
1646 [ + - ]: 414 : if (off >= 0)
1647 : : goto area_found;
1648 : :
1649 : : err = "alloc from reserved chunk failed";
1650 : : goto fail_unlock;
1651 : : }
1652 : :
1653 : : restart:
1654 : : /* search through normal chunks */
1655 [ + - ]: 5760000 : for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1656 [ + + ]: 5321530 : list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
1657 : 438470 : off = pcpu_find_block_fit(chunk, bits, bit_align,
1658 : : is_atomic);
1659 [ - + ]: 438470 : if (off < 0) {
1660 [ # # ]: 0 : if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1661 : : pcpu_chunk_move(chunk, 0);
1662 : 0 : continue;
1663 : : }
1664 : :
1665 : 438470 : off = pcpu_alloc_area(chunk, bits, bit_align, off);
1666 [ - + ]: 438470 : if (off >= 0)
1667 : : goto area_found;
1668 : :
1669 : : }
1670 : : }
1671 : :
1672 : : spin_unlock_irqrestore(&pcpu_lock, flags);
1673 : :
1674 : : /*
1675 : : * No space left. Create a new chunk. We don't want multiple
1676 : : * tasks to create chunks simultaneously. Serialize and create iff
1677 : : * there's still no empty chunk after grabbing the mutex.
1678 : : */
1679 [ # # ]: 0 : if (is_atomic) {
1680 : : err = "atomic alloc failed, no space left";
1681 : : goto fail;
1682 : : }
1683 : :
1684 [ # # ]: 0 : if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1685 : 0 : chunk = pcpu_create_chunk(pcpu_gfp);
1686 [ # # ]: 0 : if (!chunk) {
1687 : : err = "failed to allocate new chunk";
1688 : : goto fail;
1689 : : }
1690 : :
1691 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
1692 : 0 : pcpu_chunk_relocate(chunk, -1);
1693 : : } else {
1694 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
1695 : : }
1696 : :
1697 : : goto restart;
1698 : :
1699 : : area_found:
1700 : : pcpu_stats_area_alloc(chunk, size);
1701 : : spin_unlock_irqrestore(&pcpu_lock, flags);
1702 : :
1703 : : /* populate if not all pages are already there */
1704 [ + + ]: 438884 : if (!is_atomic) {
1705 : : int page_start, page_end, rs, re;
1706 : :
1707 : 438426 : page_start = PFN_DOWN(off);
1708 : 438426 : page_end = PFN_UP(off + size);
1709 : :
1710 [ - + ]: 438426 : pcpu_for_each_unpop_region(chunk->populated, rs, re,
1711 : : page_start, page_end) {
1712 [ # # ]: 0 : WARN_ON(chunk->immutable);
1713 : :
1714 : 0 : ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1715 : :
1716 : 0 : spin_lock_irqsave(&pcpu_lock, flags);
1717 [ # # ]: 0 : if (ret) {
1718 : 0 : pcpu_free_area(chunk, off);
1719 : : err = "failed to populate";
1720 : 0 : goto fail_unlock;
1721 : : }
1722 : 0 : pcpu_chunk_populated(chunk, rs, re);
1723 : : spin_unlock_irqrestore(&pcpu_lock, flags);
1724 : : }
1725 : :
1726 : 438426 : mutex_unlock(&pcpu_alloc_mutex);
1727 : : }
1728 : :
1729 [ + + ]: 438884 : if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1730 : 621 : pcpu_schedule_balance_work();
1731 : :
1732 : : /* clear the areas and return address relative to base address */
1733 [ + + ]: 2194420 : for_each_possible_cpu(cpu)
1734 : 1755536 : memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1735 : :
1736 : 438884 : ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1737 : : kmemleak_alloc_percpu(ptr, size, gfp);
1738 : :
1739 : 438884 : trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1740 : : chunk->base_addr, off, ptr);
1741 : :
1742 : 438884 : return ptr;
1743 : :
1744 : : fail_unlock:
1745 : : spin_unlock_irqrestore(&pcpu_lock, flags);
1746 : : fail:
1747 : 0 : trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1748 : :
1749 [ # # # # ]: 0 : if (!is_atomic && do_warn && warn_limit) {
1750 : 0 : pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1751 : : size, align, is_atomic, err);
1752 : 0 : dump_stack();
1753 [ # # ]: 0 : if (!--warn_limit)
1754 : 0 : pr_info("limit reached, disable warning\n");
1755 : : }
1756 [ # # ]: 0 : if (is_atomic) {
1757 : : /* see the flag handling in pcpu_blance_workfn() */
1758 : 0 : pcpu_atomic_alloc_failed = true;
1759 : 0 : pcpu_schedule_balance_work();
1760 : : } else {
1761 : 0 : mutex_unlock(&pcpu_alloc_mutex);
1762 : : }
1763 : : return NULL;
1764 : : }
1765 : :
1766 : : /**
1767 : : * __alloc_percpu_gfp - allocate dynamic percpu area
1768 : : * @size: size of area to allocate in bytes
1769 : : * @align: alignment of area (max PAGE_SIZE)
1770 : : * @gfp: allocation flags
1771 : : *
1772 : : * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1773 : : * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1774 : : * be called from any context but is a lot more likely to fail. If @gfp
1775 : : * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1776 : : * allocation requests.
1777 : : *
1778 : : * RETURNS:
1779 : : * Percpu pointer to the allocated area on success, NULL on failure.
1780 : : */
1781 : 268787 : void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1782 : : {
1783 : 268787 : return pcpu_alloc(size, align, false, gfp);
1784 : : }
1785 : : EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1786 : :
1787 : : /**
1788 : : * __alloc_percpu - allocate dynamic percpu area
1789 : : * @size: size of area to allocate in bytes
1790 : : * @align: alignment of area (max PAGE_SIZE)
1791 : : *
1792 : : * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1793 : : */
1794 : 169682 : void __percpu *__alloc_percpu(size_t size, size_t align)
1795 : : {
1796 : 169682 : return pcpu_alloc(size, align, false, GFP_KERNEL);
1797 : : }
1798 : : EXPORT_SYMBOL_GPL(__alloc_percpu);
1799 : :
1800 : : /**
1801 : : * __alloc_reserved_percpu - allocate reserved percpu area
1802 : : * @size: size of area to allocate in bytes
1803 : : * @align: alignment of area (max PAGE_SIZE)
1804 : : *
1805 : : * Allocate zero-filled percpu area of @size bytes aligned at @align
1806 : : * from reserved percpu area if arch has set it up; otherwise,
1807 : : * allocation is served from the same dynamic area. Might sleep.
1808 : : * Might trigger writeouts.
1809 : : *
1810 : : * CONTEXT:
1811 : : * Does GFP_KERNEL allocation.
1812 : : *
1813 : : * RETURNS:
1814 : : * Percpu pointer to the allocated area on success, NULL on failure.
1815 : : */
1816 : 414 : void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1817 : : {
1818 : 414 : return pcpu_alloc(size, align, true, GFP_KERNEL);
1819 : : }
1820 : :
1821 : : /**
1822 : : * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1823 : : * @work: unused
1824 : : *
1825 : : * Reclaim all fully free chunks except for the first one. This is also
1826 : : * responsible for maintaining the pool of empty populated pages. However,
1827 : : * it is possible that this is called when physical memory is scarce causing
1828 : : * OOM killer to be triggered. We should avoid doing so until an actual
1829 : : * allocation causes the failure as it is possible that requests can be
1830 : : * serviced from already backed regions.
1831 : : */
1832 : 414 : static void pcpu_balance_workfn(struct work_struct *work)
1833 : : {
1834 : : /* gfp flags passed to underlying allocators */
1835 : : const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1836 : 414 : LIST_HEAD(to_free);
1837 : 414 : struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1838 : : struct pcpu_chunk *chunk, *next;
1839 : : int slot, nr_to_pop, ret;
1840 : :
1841 : : /*
1842 : : * There's no reason to keep around multiple unused chunks and VM
1843 : : * areas can be scarce. Destroy all free chunks except for one.
1844 : : */
1845 : 414 : mutex_lock(&pcpu_alloc_mutex);
1846 : : spin_lock_irq(&pcpu_lock);
1847 : :
1848 [ - + ]: 414 : list_for_each_entry_safe(chunk, next, free_head, list) {
1849 [ # # ]: 0 : WARN_ON(chunk->immutable);
1850 : :
1851 : : /* spare the first one */
1852 [ # # ]: 0 : if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1853 : 0 : continue;
1854 : :
1855 : : list_move(&chunk->list, &to_free);
1856 : : }
1857 : :
1858 : : spin_unlock_irq(&pcpu_lock);
1859 : :
1860 [ - + ]: 414 : list_for_each_entry_safe(chunk, next, &to_free, list) {
1861 : : int rs, re;
1862 : :
1863 [ # # ]: 0 : pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
1864 : : chunk->nr_pages) {
1865 : 0 : pcpu_depopulate_chunk(chunk, rs, re);
1866 : : spin_lock_irq(&pcpu_lock);
1867 : 0 : pcpu_chunk_depopulated(chunk, rs, re);
1868 : : spin_unlock_irq(&pcpu_lock);
1869 : : }
1870 : 0 : pcpu_destroy_chunk(chunk);
1871 : 0 : cond_resched();
1872 : : }
1873 : :
1874 : : /*
1875 : : * Ensure there are certain number of free populated pages for
1876 : : * atomic allocs. Fill up from the most packed so that atomic
1877 : : * allocs don't increase fragmentation. If atomic allocation
1878 : : * failed previously, always populate the maximum amount. This
1879 : : * should prevent atomic allocs larger than PAGE_SIZE from keeping
1880 : : * failing indefinitely; however, large atomic allocs are not
1881 : : * something we support properly and can be highly unreliable and
1882 : : * inefficient.
1883 : : */
1884 : : retry_pop:
1885 [ - + ]: 621 : if (pcpu_atomic_alloc_failed) {
1886 : : nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1887 : : /* best effort anyway, don't worry about synchronization */
1888 : 0 : pcpu_atomic_alloc_failed = false;
1889 : : } else {
1890 : 621 : nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1891 : : pcpu_nr_empty_pop_pages,
1892 : : 0, PCPU_EMPTY_POP_PAGES_HIGH);
1893 : : }
1894 : :
1895 [ + + ]: 4140 : for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1896 : : int nr_unpop = 0, rs, re;
1897 : :
1898 [ + + ]: 3726 : if (!nr_to_pop)
1899 : : break;
1900 : :
1901 : : spin_lock_irq(&pcpu_lock);
1902 [ + + ]: 3933 : list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1903 : 828 : nr_unpop = chunk->nr_pages - chunk->nr_populated;
1904 [ + + ]: 828 : if (nr_unpop)
1905 : : break;
1906 : : }
1907 : : spin_unlock_irq(&pcpu_lock);
1908 : :
1909 [ + + ]: 3519 : if (!nr_unpop)
1910 : 3105 : continue;
1911 : :
1912 : : /* @chunk can't go away while pcpu_alloc_mutex is held */
1913 [ + - ]: 414 : pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
1914 : : chunk->nr_pages) {
1915 : 414 : int nr = min(re - rs, nr_to_pop);
1916 : :
1917 : 414 : ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
1918 [ + - ]: 414 : if (!ret) {
1919 : 414 : nr_to_pop -= nr;
1920 : : spin_lock_irq(&pcpu_lock);
1921 : 414 : pcpu_chunk_populated(chunk, rs, rs + nr);
1922 : : spin_unlock_irq(&pcpu_lock);
1923 : : } else {
1924 : : nr_to_pop = 0;
1925 : : }
1926 : :
1927 [ - + ]: 414 : if (!nr_to_pop)
1928 : : break;
1929 : : }
1930 : : }
1931 : :
1932 [ + + ]: 621 : if (nr_to_pop) {
1933 : : /* ran out of chunks to populate, create a new one and retry */
1934 : 207 : chunk = pcpu_create_chunk(gfp);
1935 [ + - ]: 207 : if (chunk) {
1936 : : spin_lock_irq(&pcpu_lock);
1937 : 207 : pcpu_chunk_relocate(chunk, -1);
1938 : : spin_unlock_irq(&pcpu_lock);
1939 : : goto retry_pop;
1940 : : }
1941 : : }
1942 : :
1943 : 414 : mutex_unlock(&pcpu_alloc_mutex);
1944 : 414 : }
1945 : :
1946 : : /**
1947 : : * free_percpu - free percpu area
1948 : : * @ptr: pointer to area to free
1949 : : *
1950 : : * Free percpu area @ptr.
1951 : : *
1952 : : * CONTEXT:
1953 : : * Can be called from atomic context.
1954 : : */
1955 : 80890 : void free_percpu(void __percpu *ptr)
1956 : : {
1957 : : void *addr;
1958 : : struct pcpu_chunk *chunk;
1959 : : unsigned long flags;
1960 : : int off;
1961 : : bool need_balance = false;
1962 : :
1963 [ + + ]: 80890 : if (!ptr)
1964 : 80898 : return;
1965 : :
1966 : : kmemleak_free_percpu(ptr);
1967 : :
1968 : 76438 : addr = __pcpu_ptr_to_addr(ptr);
1969 : :
1970 : 76438 : spin_lock_irqsave(&pcpu_lock, flags);
1971 : :
1972 : 76446 : chunk = pcpu_chunk_addr_search(addr);
1973 : 76446 : off = addr - chunk->base_addr;
1974 : :
1975 : 76446 : pcpu_free_area(chunk, off);
1976 : :
1977 : : /* if there are more than one fully free chunks, wake up grim reaper */
1978 [ - + ]: 76446 : if (chunk->free_bytes == pcpu_unit_size) {
1979 : : struct pcpu_chunk *pos;
1980 : :
1981 [ # # ]: 0 : list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1982 [ # # ]: 0 : if (pos != chunk) {
1983 : : need_balance = true;
1984 : : break;
1985 : : }
1986 : : }
1987 : :
1988 : 76446 : trace_percpu_free_percpu(chunk->base_addr, off, ptr);
1989 : :
1990 : : spin_unlock_irqrestore(&pcpu_lock, flags);
1991 : :
1992 [ - + ]: 76446 : if (need_balance)
1993 : 0 : pcpu_schedule_balance_work();
1994 : : }
1995 : : EXPORT_SYMBOL_GPL(free_percpu);
1996 : :
1997 : 0 : bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1998 : : {
1999 : : #ifdef CONFIG_SMP
2000 : 0 : const size_t static_size = __per_cpu_end - __per_cpu_start;
2001 : : void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2002 : : unsigned int cpu;
2003 : :
2004 [ # # ]: 0 : for_each_possible_cpu(cpu) {
2005 : 0 : void *start = per_cpu_ptr(base, cpu);
2006 : 0 : void *va = (void *)addr;
2007 : :
2008 [ # # # # ]: 0 : if (va >= start && va < start + static_size) {
2009 [ # # ]: 0 : if (can_addr) {
2010 : 0 : *can_addr = (unsigned long) (va - start);
2011 : 0 : *can_addr += (unsigned long)
2012 : 0 : per_cpu_ptr(base, get_boot_cpu_id());
2013 : : }
2014 : : return true;
2015 : : }
2016 : : }
2017 : : #endif
2018 : : /* on UP, can't distinguish from other static vars, always false */
2019 : : return false;
2020 : : }
2021 : :
2022 : : /**
2023 : : * is_kernel_percpu_address - test whether address is from static percpu area
2024 : : * @addr: address to test
2025 : : *
2026 : : * Test whether @addr belongs to in-kernel static percpu area. Module
2027 : : * static percpu areas are not considered. For those, use
2028 : : * is_module_percpu_address().
2029 : : *
2030 : : * RETURNS:
2031 : : * %true if @addr is from in-kernel static percpu area, %false otherwise.
2032 : : */
2033 : 0 : bool is_kernel_percpu_address(unsigned long addr)
2034 : : {
2035 : 0 : return __is_kernel_percpu_address(addr, NULL);
2036 : : }
2037 : :
2038 : : /**
2039 : : * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2040 : : * @addr: the address to be converted to physical address
2041 : : *
2042 : : * Given @addr which is dereferenceable address obtained via one of
2043 : : * percpu access macros, this function translates it into its physical
2044 : : * address. The caller is responsible for ensuring @addr stays valid
2045 : : * until this function finishes.
2046 : : *
2047 : : * percpu allocator has special setup for the first chunk, which currently
2048 : : * supports either embedding in linear address space or vmalloc mapping,
2049 : : * and, from the second one, the backing allocator (currently either vm or
2050 : : * km) provides translation.
2051 : : *
2052 : : * The addr can be translated simply without checking if it falls into the
2053 : : * first chunk. But the current code reflects better how percpu allocator
2054 : : * actually works, and the verification can discover both bugs in percpu
2055 : : * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2056 : : * code.
2057 : : *
2058 : : * RETURNS:
2059 : : * The physical address for @addr.
2060 : : */
2061 : 0 : phys_addr_t per_cpu_ptr_to_phys(void *addr)
2062 : : {
2063 : : void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2064 : : bool in_first_chunk = false;
2065 : : unsigned long first_low, first_high;
2066 : : unsigned int cpu;
2067 : :
2068 : : /*
2069 : : * The following test on unit_low/high isn't strictly
2070 : : * necessary but will speed up lookups of addresses which
2071 : : * aren't in the first chunk.
2072 : : *
2073 : : * The address check is against full chunk sizes. pcpu_base_addr
2074 : : * points to the beginning of the first chunk including the
2075 : : * static region. Assumes good intent as the first chunk may
2076 : : * not be full (ie. < pcpu_unit_pages in size).
2077 : : */
2078 : 0 : first_low = (unsigned long)pcpu_base_addr +
2079 : 0 : pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2080 : 0 : first_high = (unsigned long)pcpu_base_addr +
2081 : 0 : pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2082 [ # # ]: 0 : if ((unsigned long)addr >= first_low &&
2083 : 0 : (unsigned long)addr < first_high) {
2084 [ # # ]: 0 : for_each_possible_cpu(cpu) {
2085 : 0 : void *start = per_cpu_ptr(base, cpu);
2086 : :
2087 [ # # # # ]: 0 : if (addr >= start && addr < start + pcpu_unit_size) {
2088 : : in_first_chunk = true;
2089 : : break;
2090 : : }
2091 : : }
2092 : : }
2093 : :
2094 [ # # ]: 0 : if (in_first_chunk) {
2095 [ # # ]: 0 : if (!is_vmalloc_addr(addr))
2096 : 0 : return __pa(addr);
2097 : : else
2098 : 0 : return page_to_phys(vmalloc_to_page(addr)) +
2099 : 0 : offset_in_page(addr);
2100 : : } else
2101 : 0 : return page_to_phys(pcpu_addr_to_page(addr)) +
2102 : 0 : offset_in_page(addr);
2103 : : }
2104 : :
2105 : : /**
2106 : : * pcpu_alloc_alloc_info - allocate percpu allocation info
2107 : : * @nr_groups: the number of groups
2108 : : * @nr_units: the number of units
2109 : : *
2110 : : * Allocate ai which is large enough for @nr_groups groups containing
2111 : : * @nr_units units. The returned ai's groups[0].cpu_map points to the
2112 : : * cpu_map array which is long enough for @nr_units and filled with
2113 : : * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2114 : : * pointer of other groups.
2115 : : *
2116 : : * RETURNS:
2117 : : * Pointer to the allocated pcpu_alloc_info on success, NULL on
2118 : : * failure.
2119 : : */
2120 : 207 : struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2121 : : int nr_units)
2122 : : {
2123 : : struct pcpu_alloc_info *ai;
2124 : : size_t base_size, ai_size;
2125 : : void *ptr;
2126 : : int unit;
2127 : :
2128 : 207 : base_size = ALIGN(struct_size(ai, groups, nr_groups),
2129 : : __alignof__(ai->groups[0].cpu_map[0]));
2130 : 207 : ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2131 : :
2132 : 207 : ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2133 [ + - ]: 207 : if (!ptr)
2134 : : return NULL;
2135 : : ai = ptr;
2136 : 207 : ptr += base_size;
2137 : :
2138 : 207 : ai->groups[0].cpu_map = ptr;
2139 : :
2140 [ + + ]: 1035 : for (unit = 0; unit < nr_units; unit++)
2141 : 828 : ai->groups[0].cpu_map[unit] = NR_CPUS;
2142 : :
2143 : 207 : ai->nr_groups = nr_groups;
2144 : 207 : ai->__ai_size = PFN_ALIGN(ai_size);
2145 : :
2146 : 207 : return ai;
2147 : : }
2148 : :
2149 : : /**
2150 : : * pcpu_free_alloc_info - free percpu allocation info
2151 : : * @ai: pcpu_alloc_info to free
2152 : : *
2153 : : * Free @ai which was allocated by pcpu_alloc_alloc_info().
2154 : : */
2155 : 207 : void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2156 : : {
2157 : 414 : memblock_free_early(__pa(ai), ai->__ai_size);
2158 : 207 : }
2159 : :
2160 : : /**
2161 : : * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2162 : : * @lvl: loglevel
2163 : : * @ai: allocation info to dump
2164 : : *
2165 : : * Print out information about @ai using loglevel @lvl.
2166 : : */
2167 : 207 : static void pcpu_dump_alloc_info(const char *lvl,
2168 : : const struct pcpu_alloc_info *ai)
2169 : : {
2170 : : int group_width = 1, cpu_width = 1, width;
2171 : 207 : char empty_str[] = "--------";
2172 : : int alloc = 0, alloc_end = 0;
2173 : : int group, v;
2174 : : int upa, apl; /* units per alloc, allocs per line */
2175 : :
2176 : 207 : v = ai->nr_groups;
2177 [ - + ]: 414 : while (v /= 10)
2178 : 0 : group_width++;
2179 : :
2180 : 207 : v = num_possible_cpus();
2181 [ - + ]: 414 : while (v /= 10)
2182 : 0 : cpu_width++;
2183 : 207 : empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2184 : :
2185 : 207 : upa = ai->alloc_size / ai->unit_size;
2186 : 207 : width = upa * (cpu_width + 1) + group_width + 3;
2187 : 414 : apl = rounddown_pow_of_two(max(60 / width, 1));
2188 : :
2189 : 414 : printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2190 : : lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2191 : 207 : ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2192 : :
2193 [ + + ]: 414 : for (group = 0; group < ai->nr_groups; group++) {
2194 : : const struct pcpu_group_info *gi = &ai->groups[group];
2195 : : int unit = 0, unit_end = 0;
2196 : :
2197 [ - + ]: 207 : BUG_ON(gi->nr_units % upa);
2198 [ + + ]: 1242 : for (alloc_end += gi->nr_units / upa;
2199 : 828 : alloc < alloc_end; alloc++) {
2200 [ + + ]: 828 : if (!(alloc % apl)) {
2201 : 207 : pr_cont("\n");
2202 : 207 : printk("%spcpu-alloc: ", lvl);
2203 : : }
2204 : 828 : pr_cont("[%0*d] ", group_width, group);
2205 : :
2206 [ + + ]: 1656 : for (unit_end += upa; unit < unit_end; unit++)
2207 [ + - ]: 828 : if (gi->cpu_map[unit] != NR_CPUS)
2208 : 828 : pr_cont("%0*d ",
2209 : : cpu_width, gi->cpu_map[unit]);
2210 : : else
2211 : 0 : pr_cont("%s ", empty_str);
2212 : : }
2213 : : }
2214 : 207 : pr_cont("\n");
2215 : 207 : }
2216 : :
2217 : : /**
2218 : : * pcpu_setup_first_chunk - initialize the first percpu chunk
2219 : : * @ai: pcpu_alloc_info describing how to percpu area is shaped
2220 : : * @base_addr: mapped address
2221 : : *
2222 : : * Initialize the first percpu chunk which contains the kernel static
2223 : : * percpu area. This function is to be called from arch percpu area
2224 : : * setup path.
2225 : : *
2226 : : * @ai contains all information necessary to initialize the first
2227 : : * chunk and prime the dynamic percpu allocator.
2228 : : *
2229 : : * @ai->static_size is the size of static percpu area.
2230 : : *
2231 : : * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2232 : : * reserve after the static area in the first chunk. This reserves
2233 : : * the first chunk such that it's available only through reserved
2234 : : * percpu allocation. This is primarily used to serve module percpu
2235 : : * static areas on architectures where the addressing model has
2236 : : * limited offset range for symbol relocations to guarantee module
2237 : : * percpu symbols fall inside the relocatable range.
2238 : : *
2239 : : * @ai->dyn_size determines the number of bytes available for dynamic
2240 : : * allocation in the first chunk. The area between @ai->static_size +
2241 : : * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2242 : : *
2243 : : * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2244 : : * and equal to or larger than @ai->static_size + @ai->reserved_size +
2245 : : * @ai->dyn_size.
2246 : : *
2247 : : * @ai->atom_size is the allocation atom size and used as alignment
2248 : : * for vm areas.
2249 : : *
2250 : : * @ai->alloc_size is the allocation size and always multiple of
2251 : : * @ai->atom_size. This is larger than @ai->atom_size if
2252 : : * @ai->unit_size is larger than @ai->atom_size.
2253 : : *
2254 : : * @ai->nr_groups and @ai->groups describe virtual memory layout of
2255 : : * percpu areas. Units which should be colocated are put into the
2256 : : * same group. Dynamic VM areas will be allocated according to these
2257 : : * groupings. If @ai->nr_groups is zero, a single group containing
2258 : : * all units is assumed.
2259 : : *
2260 : : * The caller should have mapped the first chunk at @base_addr and
2261 : : * copied static data to each unit.
2262 : : *
2263 : : * The first chunk will always contain a static and a dynamic region.
2264 : : * However, the static region is not managed by any chunk. If the first
2265 : : * chunk also contains a reserved region, it is served by two chunks -
2266 : : * one for the reserved region and one for the dynamic region. They
2267 : : * share the same vm, but use offset regions in the area allocation map.
2268 : : * The chunk serving the dynamic region is circulated in the chunk slots
2269 : : * and available for dynamic allocation like any other chunk.
2270 : : */
2271 : 207 : void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2272 : : void *base_addr)
2273 : : {
2274 : 207 : size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2275 : : size_t static_size, dyn_size;
2276 : : struct pcpu_chunk *chunk;
2277 : : unsigned long *group_offsets;
2278 : : size_t *group_sizes;
2279 : : unsigned long *unit_off;
2280 : : unsigned int cpu;
2281 : : int *unit_map;
2282 : : int group, unit, i;
2283 : : int map_size;
2284 : : unsigned long tmp_addr;
2285 : : size_t alloc_size;
2286 : :
2287 : : #define PCPU_SETUP_BUG_ON(cond) do { \
2288 : : if (unlikely(cond)) { \
2289 : : pr_emerg("failed to initialize, %s\n", #cond); \
2290 : : pr_emerg("cpu_possible_mask=%*pb\n", \
2291 : : cpumask_pr_args(cpu_possible_mask)); \
2292 : : pcpu_dump_alloc_info(KERN_EMERG, ai); \
2293 : : BUG(); \
2294 : : } \
2295 : : } while (0)
2296 : :
2297 : : /* sanity checks */
2298 [ - + ]: 207 : PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2299 : : #ifdef CONFIG_SMP
2300 [ - + ]: 207 : PCPU_SETUP_BUG_ON(!ai->static_size);
2301 [ - + ]: 207 : PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2302 : : #endif
2303 [ - + ]: 207 : PCPU_SETUP_BUG_ON(!base_addr);
2304 [ - + ]: 207 : PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2305 [ - + ]: 207 : PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2306 [ - + ]: 207 : PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2307 [ - + ]: 207 : PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2308 [ - + ]: 207 : PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2309 [ - + ]: 207 : PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2310 [ - + ]: 207 : PCPU_SETUP_BUG_ON(!ai->dyn_size);
2311 [ - + ]: 207 : PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2312 : : PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2313 : : IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2314 : : PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2315 : :
2316 : : /* process group information and build config tables accordingly */
2317 : 207 : alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2318 : 207 : group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2319 [ - + ]: 207 : if (!group_offsets)
2320 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
2321 : : alloc_size);
2322 : :
2323 : 207 : alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2324 : 207 : group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2325 [ - + ]: 207 : if (!group_sizes)
2326 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
2327 : : alloc_size);
2328 : :
2329 : 207 : alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2330 : 207 : unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2331 [ - + ]: 207 : if (!unit_map)
2332 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
2333 : : alloc_size);
2334 : :
2335 : 207 : alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2336 : 207 : unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2337 [ + - ]: 207 : if (!unit_off)
2338 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
2339 : : alloc_size);
2340 : :
2341 [ + + ]: 828 : for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2342 : 828 : unit_map[cpu] = UINT_MAX;
2343 : :
2344 : 207 : pcpu_low_unit_cpu = NR_CPUS;
2345 : 207 : pcpu_high_unit_cpu = NR_CPUS;
2346 : :
2347 [ + + ]: 414 : for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2348 : : const struct pcpu_group_info *gi = &ai->groups[group];
2349 : :
2350 : 207 : group_offsets[group] = gi->base_offset;
2351 : 207 : group_sizes[group] = gi->nr_units * ai->unit_size;
2352 : :
2353 [ + + ]: 1035 : for (i = 0; i < gi->nr_units; i++) {
2354 : 828 : cpu = gi->cpu_map[i];
2355 [ - + ]: 828 : if (cpu == NR_CPUS)
2356 : 0 : continue;
2357 : :
2358 [ - + ]: 828 : PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2359 [ - + ]: 1656 : PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2360 [ - + ]: 828 : PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2361 : :
2362 : 828 : unit_map[cpu] = unit + i;
2363 : 828 : unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2364 : :
2365 : : /* determine low/high unit_cpu */
2366 [ + + - + ]: 1449 : if (pcpu_low_unit_cpu == NR_CPUS ||
2367 : 621 : unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2368 : 207 : pcpu_low_unit_cpu = cpu;
2369 [ + + + - ]: 1449 : if (pcpu_high_unit_cpu == NR_CPUS ||
2370 : 621 : unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2371 : 828 : pcpu_high_unit_cpu = cpu;
2372 : : }
2373 : : }
2374 : 207 : pcpu_nr_units = unit;
2375 : :
2376 [ + + ]: 1242 : for_each_possible_cpu(cpu)
2377 [ - + ]: 828 : PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2378 : :
2379 : : /* we're done parsing the input, undefine BUG macro and dump config */
2380 : : #undef PCPU_SETUP_BUG_ON
2381 : 207 : pcpu_dump_alloc_info(KERN_DEBUG, ai);
2382 : :
2383 : 207 : pcpu_nr_groups = ai->nr_groups;
2384 : 207 : pcpu_group_offsets = group_offsets;
2385 : 207 : pcpu_group_sizes = group_sizes;
2386 : 207 : pcpu_unit_map = unit_map;
2387 : 207 : pcpu_unit_offsets = unit_off;
2388 : :
2389 : : /* determine basic parameters */
2390 : 207 : pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2391 : 207 : pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2392 : 207 : pcpu_atom_size = ai->atom_size;
2393 : 207 : pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2394 : 207 : BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2395 : :
2396 : : pcpu_stats_save_ai(ai);
2397 : :
2398 : : /*
2399 : : * Allocate chunk slots. The additional last slot is for
2400 : : * empty chunks.
2401 : : */
2402 : 207 : pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2403 : 207 : pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
2404 : : SMP_CACHE_BYTES);
2405 [ + - ]: 207 : if (!pcpu_slot)
2406 : 0 : panic("%s: Failed to allocate %zu bytes\n", __func__,
2407 : : pcpu_nr_slots * sizeof(pcpu_slot[0]));
2408 [ + + ]: 3312 : for (i = 0; i < pcpu_nr_slots; i++)
2409 : 3312 : INIT_LIST_HEAD(&pcpu_slot[i]);
2410 : :
2411 : : /*
2412 : : * The end of the static region needs to be aligned with the
2413 : : * minimum allocation size as this offsets the reserved and
2414 : : * dynamic region. The first chunk ends page aligned by
2415 : : * expanding the dynamic region, therefore the dynamic region
2416 : : * can be shrunk to compensate while still staying above the
2417 : : * configured sizes.
2418 : : */
2419 : 207 : static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2420 : 207 : dyn_size = ai->dyn_size - (static_size - ai->static_size);
2421 : :
2422 : : /*
2423 : : * Initialize first chunk.
2424 : : * If the reserved_size is non-zero, this initializes the reserved
2425 : : * chunk. If the reserved_size is zero, the reserved chunk is NULL
2426 : : * and the dynamic region is initialized here. The first chunk,
2427 : : * pcpu_first_chunk, will always point to the chunk that serves
2428 : : * the dynamic region.
2429 : : */
2430 : 207 : tmp_addr = (unsigned long)base_addr + static_size;
2431 [ + - ]: 207 : map_size = ai->reserved_size ?: dyn_size;
2432 : 207 : chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2433 : :
2434 : : /* init dynamic chunk if necessary */
2435 [ + - ]: 207 : if (ai->reserved_size) {
2436 : 207 : pcpu_reserved_chunk = chunk;
2437 : :
2438 : 207 : tmp_addr = (unsigned long)base_addr + static_size +
2439 : : ai->reserved_size;
2440 : 207 : map_size = dyn_size;
2441 : 207 : chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2442 : : }
2443 : :
2444 : : /* link the first chunk in */
2445 : 207 : pcpu_first_chunk = chunk;
2446 : 207 : pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2447 : 207 : pcpu_chunk_relocate(pcpu_first_chunk, -1);
2448 : :
2449 : : /* include all regions of the first chunk */
2450 : 207 : pcpu_nr_populated += PFN_DOWN(size_sum);
2451 : :
2452 : : pcpu_stats_chunk_alloc();
2453 : 207 : trace_percpu_create_chunk(base_addr);
2454 : :
2455 : : /* we're done */
2456 : 207 : pcpu_base_addr = base_addr;
2457 : 207 : }
2458 : :
2459 : : #ifdef CONFIG_SMP
2460 : :
2461 : : const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2462 : : [PCPU_FC_AUTO] = "auto",
2463 : : [PCPU_FC_EMBED] = "embed",
2464 : : [PCPU_FC_PAGE] = "page",
2465 : : };
2466 : :
2467 : : enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2468 : :
2469 : 0 : static int __init percpu_alloc_setup(char *str)
2470 : : {
2471 [ # # ]: 0 : if (!str)
2472 : : return -EINVAL;
2473 : :
2474 : : if (0)
2475 : : /* nada */;
2476 : : #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2477 : : else if (!strcmp(str, "embed"))
2478 : : pcpu_chosen_fc = PCPU_FC_EMBED;
2479 : : #endif
2480 : : #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2481 : : else if (!strcmp(str, "page"))
2482 : : pcpu_chosen_fc = PCPU_FC_PAGE;
2483 : : #endif
2484 : : else
2485 : 0 : pr_warn("unknown allocator %s specified\n", str);
2486 : :
2487 : 0 : return 0;
2488 : : }
2489 : : early_param("percpu_alloc", percpu_alloc_setup);
2490 : :
2491 : : /*
2492 : : * pcpu_embed_first_chunk() is used by the generic percpu setup.
2493 : : * Build it if needed by the arch config or the generic setup is going
2494 : : * to be used.
2495 : : */
2496 : : #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2497 : : !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2498 : : #define BUILD_EMBED_FIRST_CHUNK
2499 : : #endif
2500 : :
2501 : : /* build pcpu_page_first_chunk() iff needed by the arch config */
2502 : : #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2503 : : #define BUILD_PAGE_FIRST_CHUNK
2504 : : #endif
2505 : :
2506 : : /* pcpu_build_alloc_info() is used by both embed and page first chunk */
2507 : : #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2508 : : /**
2509 : : * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2510 : : * @reserved_size: the size of reserved percpu area in bytes
2511 : : * @dyn_size: minimum free size for dynamic allocation in bytes
2512 : : * @atom_size: allocation atom size
2513 : : * @cpu_distance_fn: callback to determine distance between cpus, optional
2514 : : *
2515 : : * This function determines grouping of units, their mappings to cpus
2516 : : * and other parameters considering needed percpu size, allocation
2517 : : * atom size and distances between CPUs.
2518 : : *
2519 : : * Groups are always multiples of atom size and CPUs which are of
2520 : : * LOCAL_DISTANCE both ways are grouped together and share space for
2521 : : * units in the same group. The returned configuration is guaranteed
2522 : : * to have CPUs on different nodes on different groups and >=75% usage
2523 : : * of allocated virtual address space.
2524 : : *
2525 : : * RETURNS:
2526 : : * On success, pointer to the new allocation_info is returned. On
2527 : : * failure, ERR_PTR value is returned.
2528 : : */
2529 : 207 : static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2530 : : size_t reserved_size, size_t dyn_size,
2531 : : size_t atom_size,
2532 : : pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2533 : : {
2534 : : static int group_map[NR_CPUS] __initdata;
2535 : : static int group_cnt[NR_CPUS] __initdata;
2536 : 207 : const size_t static_size = __per_cpu_end - __per_cpu_start;
2537 : : int nr_groups = 1, nr_units = 0;
2538 : : size_t size_sum, min_unit_size, alloc_size;
2539 : : int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
2540 : : int last_allocs, group, unit;
2541 : : unsigned int cpu, tcpu;
2542 : : struct pcpu_alloc_info *ai;
2543 : : unsigned int *cpu_map;
2544 : :
2545 : : /* this function may be called multiple times */
2546 : 207 : memset(group_map, 0, sizeof(group_map));
2547 : 207 : memset(group_cnt, 0, sizeof(group_cnt));
2548 : :
2549 : : /* calculate size_sum and ensure dyn_size is enough for early alloc */
2550 : 207 : size_sum = PFN_ALIGN(static_size + reserved_size +
2551 : : max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2552 : 207 : dyn_size = size_sum - static_size - reserved_size;
2553 : :
2554 : : /*
2555 : : * Determine min_unit_size, alloc_size and max_upa such that
2556 : : * alloc_size is multiple of atom_size and is the smallest
2557 : : * which can accommodate 4k aligned segments which are equal to
2558 : : * or larger than min_unit_size.
2559 : : */
2560 : 207 : min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2561 : :
2562 : : /* determine the maximum # of units that can fit in an allocation */
2563 : 207 : alloc_size = roundup(min_unit_size, atom_size);
2564 : 207 : upa = alloc_size / min_unit_size;
2565 [ - + - + ]: 414 : while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2566 : 0 : upa--;
2567 : 207 : max_upa = upa;
2568 : :
2569 : : /* group cpus according to their proximity */
2570 [ + + ]: 1242 : for_each_possible_cpu(cpu) {
2571 : : group = 0;
2572 : : next_group:
2573 [ + - ]: 2898 : for_each_possible_cpu(tcpu) {
2574 [ + + ]: 2070 : if (cpu == tcpu)
2575 : : break;
2576 [ + - - + : 1242 : if (group_map[tcpu] == group && cpu_distance_fn &&
# # ]
2577 [ # # ]: 0 : (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2578 : 0 : cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2579 : 0 : group++;
2580 : 0 : nr_groups = max(nr_groups, group + 1);
2581 : 0 : goto next_group;
2582 : : }
2583 : : }
2584 : 828 : group_map[cpu] = group;
2585 : 828 : group_cnt[group]++;
2586 : : }
2587 : :
2588 : : /*
2589 : : * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2590 : : * Expand the unit_size until we use >= 75% of the units allocated.
2591 : : * Related to atom_size, which could be much larger than the unit_size.
2592 : : */
2593 : : last_allocs = INT_MAX;
2594 [ + + ]: 207 : for (upa = max_upa; upa; upa--) {
2595 : : int allocs = 0, wasted = 0;
2596 : :
2597 [ + - + - ]: 207 : if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2598 : 0 : continue;
2599 : :
2600 [ + + ]: 207 : for (group = 0; group < nr_groups; group++) {
2601 : 207 : int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2602 : 207 : allocs += this_allocs;
2603 : 207 : wasted += this_allocs * upa - group_cnt[group];
2604 : : }
2605 : :
2606 : : /*
2607 : : * Don't accept if wastage is over 1/3. The
2608 : : * greater-than comparison ensures upa==1 always
2609 : : * passes the following check.
2610 : : */
2611 [ - + ]: 207 : if (wasted > num_possible_cpus() / 3)
2612 : 0 : continue;
2613 : :
2614 : : /* and then don't consume more memory */
2615 [ + - ]: 207 : if (allocs > last_allocs)
2616 : : break;
2617 : 207 : last_allocs = allocs;
2618 : : best_upa = upa;
2619 : : }
2620 : 207 : upa = best_upa;
2621 : :
2622 : : /* allocate and fill alloc_info */
2623 [ + + ]: 414 : for (group = 0; group < nr_groups; group++)
2624 : 207 : nr_units += roundup(group_cnt[group], upa);
2625 : :
2626 : 207 : ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2627 [ + - ]: 207 : if (!ai)
2628 : : return ERR_PTR(-ENOMEM);
2629 : 207 : cpu_map = ai->groups[0].cpu_map;
2630 : :
2631 [ + + ]: 414 : for (group = 0; group < nr_groups; group++) {
2632 : 207 : ai->groups[group].cpu_map = cpu_map;
2633 : 207 : cpu_map += roundup(group_cnt[group], upa);
2634 : : }
2635 : :
2636 : 207 : ai->static_size = static_size;
2637 : 207 : ai->reserved_size = reserved_size;
2638 : 207 : ai->dyn_size = dyn_size;
2639 : 207 : ai->unit_size = alloc_size / upa;
2640 : 207 : ai->atom_size = atom_size;
2641 : 207 : ai->alloc_size = alloc_size;
2642 : :
2643 [ + + ]: 414 : for (group = 0, unit = 0; group < nr_groups; group++) {
2644 : : struct pcpu_group_info *gi = &ai->groups[group];
2645 : :
2646 : : /*
2647 : : * Initialize base_offset as if all groups are located
2648 : : * back-to-back. The caller should update this to
2649 : : * reflect actual allocation.
2650 : : */
2651 : 207 : gi->base_offset = unit * ai->unit_size;
2652 : :
2653 [ + + ]: 1242 : for_each_possible_cpu(cpu)
2654 [ + - ]: 828 : if (group_map[cpu] == group)
2655 : 828 : gi->cpu_map[gi->nr_units++] = cpu;
2656 : 207 : gi->nr_units = roundup(gi->nr_units, upa);
2657 : 207 : unit += gi->nr_units;
2658 : : }
2659 [ - + ]: 207 : BUG_ON(unit != nr_units);
2660 : :
2661 : : return ai;
2662 : : }
2663 : : #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2664 : :
2665 : : #if defined(BUILD_EMBED_FIRST_CHUNK)
2666 : : /**
2667 : : * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2668 : : * @reserved_size: the size of reserved percpu area in bytes
2669 : : * @dyn_size: minimum free size for dynamic allocation in bytes
2670 : : * @atom_size: allocation atom size
2671 : : * @cpu_distance_fn: callback to determine distance between cpus, optional
2672 : : * @alloc_fn: function to allocate percpu page
2673 : : * @free_fn: function to free percpu page
2674 : : *
2675 : : * This is a helper to ease setting up embedded first percpu chunk and
2676 : : * can be called where pcpu_setup_first_chunk() is expected.
2677 : : *
2678 : : * If this function is used to setup the first chunk, it is allocated
2679 : : * by calling @alloc_fn and used as-is without being mapped into
2680 : : * vmalloc area. Allocations are always whole multiples of @atom_size
2681 : : * aligned to @atom_size.
2682 : : *
2683 : : * This enables the first chunk to piggy back on the linear physical
2684 : : * mapping which often uses larger page size. Please note that this
2685 : : * can result in very sparse cpu->unit mapping on NUMA machines thus
2686 : : * requiring large vmalloc address space. Don't use this allocator if
2687 : : * vmalloc space is not orders of magnitude larger than distances
2688 : : * between node memory addresses (ie. 32bit NUMA machines).
2689 : : *
2690 : : * @dyn_size specifies the minimum dynamic area size.
2691 : : *
2692 : : * If the needed size is smaller than the minimum or specified unit
2693 : : * size, the leftover is returned using @free_fn.
2694 : : *
2695 : : * RETURNS:
2696 : : * 0 on success, -errno on failure.
2697 : : */
2698 : 207 : int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2699 : : size_t atom_size,
2700 : : pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2701 : : pcpu_fc_alloc_fn_t alloc_fn,
2702 : : pcpu_fc_free_fn_t free_fn)
2703 : : {
2704 : : void *base = (void *)ULONG_MAX;
2705 : : void **areas = NULL;
2706 : : struct pcpu_alloc_info *ai;
2707 : : size_t size_sum, areas_size;
2708 : : unsigned long max_distance;
2709 : : int group, i, highest_group, rc = 0;
2710 : :
2711 : 207 : ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2712 : : cpu_distance_fn);
2713 [ - + ]: 207 : if (IS_ERR(ai))
2714 : 0 : return PTR_ERR(ai);
2715 : :
2716 : 207 : size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2717 : 207 : areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2718 : :
2719 : 207 : areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2720 [ + - ]: 207 : if (!areas) {
2721 : : rc = -ENOMEM;
2722 : : goto out_free;
2723 : : }
2724 : :
2725 : : /* allocate, copy and determine base address & max_distance */
2726 : : highest_group = 0;
2727 [ + + ]: 207 : for (group = 0; group < ai->nr_groups; group++) {
2728 : : struct pcpu_group_info *gi = &ai->groups[group];
2729 : : unsigned int cpu = NR_CPUS;
2730 : : void *ptr;
2731 : :
2732 [ + - + + ]: 207 : for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2733 : 207 : cpu = gi->cpu_map[i];
2734 [ - + ]: 207 : BUG_ON(cpu == NR_CPUS);
2735 : :
2736 : : /* allocate space for the whole group */
2737 : 207 : ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2738 [ - + ]: 207 : if (!ptr) {
2739 : : rc = -ENOMEM;
2740 : : goto out_free_areas;
2741 : : }
2742 : : /* kmemleak tracks the percpu allocations separately */
2743 : : kmemleak_free(ptr);
2744 : 207 : areas[group] = ptr;
2745 : :
2746 : 207 : base = min(ptr, base);
2747 [ - + ]: 207 : if (ptr > areas[highest_group])
2748 : : highest_group = group;
2749 : : }
2750 : 207 : max_distance = areas[highest_group] - base;
2751 : 207 : max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2752 : :
2753 : : /* warn if maximum distance is further than 75% of vmalloc space */
2754 [ - + ]: 207 : if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2755 : 0 : pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2756 : : max_distance, VMALLOC_TOTAL);
2757 : : #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2758 : : /* and fail if we have fallback */
2759 : : rc = -EINVAL;
2760 : : goto out_free_areas;
2761 : : #endif
2762 : : }
2763 : :
2764 : : /*
2765 : : * Copy data and free unused parts. This should happen after all
2766 : : * allocations are complete; otherwise, we may end up with
2767 : : * overlapping groups.
2768 : : */
2769 [ + + ]: 207 : for (group = 0; group < ai->nr_groups; group++) {
2770 : : struct pcpu_group_info *gi = &ai->groups[group];
2771 : 207 : void *ptr = areas[group];
2772 : :
2773 [ + + ]: 1035 : for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2774 [ - + ]: 828 : if (gi->cpu_map[i] == NR_CPUS) {
2775 : : /* unused unit, free whole */
2776 : 0 : free_fn(ptr, ai->unit_size);
2777 : 0 : continue;
2778 : : }
2779 : : /* copy and return the unused part */
2780 : 828 : memcpy(ptr, __per_cpu_load, ai->static_size);
2781 : 828 : free_fn(ptr + size_sum, ai->unit_size - size_sum);
2782 : : }
2783 : : }
2784 : :
2785 : : /* base address is now known, determine group base offsets */
2786 [ + + ]: 207 : for (group = 0; group < ai->nr_groups; group++) {
2787 : 207 : ai->groups[group].base_offset = areas[group] - base;
2788 : : }
2789 : :
2790 : 207 : pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2791 : : PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2792 : : ai->dyn_size, ai->unit_size);
2793 : :
2794 : 207 : pcpu_setup_first_chunk(ai, base);
2795 : 207 : goto out_free;
2796 : :
2797 : : out_free_areas:
2798 [ # # ]: 0 : for (group = 0; group < ai->nr_groups; group++)
2799 [ # # ]: 0 : if (areas[group])
2800 : 0 : free_fn(areas[group],
2801 : 0 : ai->groups[group].nr_units * ai->unit_size);
2802 : : out_free:
2803 : 207 : pcpu_free_alloc_info(ai);
2804 [ + - ]: 207 : if (areas)
2805 : 207 : memblock_free_early(__pa(areas), areas_size);
2806 : 207 : return rc;
2807 : : }
2808 : : #endif /* BUILD_EMBED_FIRST_CHUNK */
2809 : :
2810 : : #ifdef BUILD_PAGE_FIRST_CHUNK
2811 : : /**
2812 : : * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2813 : : * @reserved_size: the size of reserved percpu area in bytes
2814 : : * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2815 : : * @free_fn: function to free percpu page, always called with PAGE_SIZE
2816 : : * @populate_pte_fn: function to populate pte
2817 : : *
2818 : : * This is a helper to ease setting up page-remapped first percpu
2819 : : * chunk and can be called where pcpu_setup_first_chunk() is expected.
2820 : : *
2821 : : * This is the basic allocator. Static percpu area is allocated
2822 : : * page-by-page into vmalloc area.
2823 : : *
2824 : : * RETURNS:
2825 : : * 0 on success, -errno on failure.
2826 : : */
2827 : : int __init pcpu_page_first_chunk(size_t reserved_size,
2828 : : pcpu_fc_alloc_fn_t alloc_fn,
2829 : : pcpu_fc_free_fn_t free_fn,
2830 : : pcpu_fc_populate_pte_fn_t populate_pte_fn)
2831 : : {
2832 : : static struct vm_struct vm;
2833 : : struct pcpu_alloc_info *ai;
2834 : : char psize_str[16];
2835 : : int unit_pages;
2836 : : size_t pages_size;
2837 : : struct page **pages;
2838 : : int unit, i, j, rc = 0;
2839 : : int upa;
2840 : : int nr_g0_units;
2841 : :
2842 : : snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2843 : :
2844 : : ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2845 : : if (IS_ERR(ai))
2846 : : return PTR_ERR(ai);
2847 : : BUG_ON(ai->nr_groups != 1);
2848 : : upa = ai->alloc_size/ai->unit_size;
2849 : : nr_g0_units = roundup(num_possible_cpus(), upa);
2850 : : if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
2851 : : pcpu_free_alloc_info(ai);
2852 : : return -EINVAL;
2853 : : }
2854 : :
2855 : : unit_pages = ai->unit_size >> PAGE_SHIFT;
2856 : :
2857 : : /* unaligned allocations can't be freed, round up to page size */
2858 : : pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2859 : : sizeof(pages[0]));
2860 : : pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
2861 : : if (!pages)
2862 : : panic("%s: Failed to allocate %zu bytes\n", __func__,
2863 : : pages_size);
2864 : :
2865 : : /* allocate pages */
2866 : : j = 0;
2867 : : for (unit = 0; unit < num_possible_cpus(); unit++) {
2868 : : unsigned int cpu = ai->groups[0].cpu_map[unit];
2869 : : for (i = 0; i < unit_pages; i++) {
2870 : : void *ptr;
2871 : :
2872 : : ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2873 : : if (!ptr) {
2874 : : pr_warn("failed to allocate %s page for cpu%u\n",
2875 : : psize_str, cpu);
2876 : : goto enomem;
2877 : : }
2878 : : /* kmemleak tracks the percpu allocations separately */
2879 : : kmemleak_free(ptr);
2880 : : pages[j++] = virt_to_page(ptr);
2881 : : }
2882 : : }
2883 : :
2884 : : /* allocate vm area, map the pages and copy static data */
2885 : : vm.flags = VM_ALLOC;
2886 : : vm.size = num_possible_cpus() * ai->unit_size;
2887 : : vm_area_register_early(&vm, PAGE_SIZE);
2888 : :
2889 : : for (unit = 0; unit < num_possible_cpus(); unit++) {
2890 : : unsigned long unit_addr =
2891 : : (unsigned long)vm.addr + unit * ai->unit_size;
2892 : :
2893 : : for (i = 0; i < unit_pages; i++)
2894 : : populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2895 : :
2896 : : /* pte already populated, the following shouldn't fail */
2897 : : rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2898 : : unit_pages);
2899 : : if (rc < 0)
2900 : : panic("failed to map percpu area, err=%d\n", rc);
2901 : :
2902 : : /*
2903 : : * FIXME: Archs with virtual cache should flush local
2904 : : * cache for the linear mapping here - something
2905 : : * equivalent to flush_cache_vmap() on the local cpu.
2906 : : * flush_cache_vmap() can't be used as most supporting
2907 : : * data structures are not set up yet.
2908 : : */
2909 : :
2910 : : /* copy static data */
2911 : : memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2912 : : }
2913 : :
2914 : : /* we're ready, commit */
2915 : : pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2916 : : unit_pages, psize_str, ai->static_size,
2917 : : ai->reserved_size, ai->dyn_size);
2918 : :
2919 : : pcpu_setup_first_chunk(ai, vm.addr);
2920 : : goto out_free_ar;
2921 : :
2922 : : enomem:
2923 : : while (--j >= 0)
2924 : : free_fn(page_address(pages[j]), PAGE_SIZE);
2925 : : rc = -ENOMEM;
2926 : : out_free_ar:
2927 : : memblock_free_early(__pa(pages), pages_size);
2928 : : pcpu_free_alloc_info(ai);
2929 : : return rc;
2930 : : }
2931 : : #endif /* BUILD_PAGE_FIRST_CHUNK */
2932 : :
2933 : : #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2934 : : /*
2935 : : * Generic SMP percpu area setup.
2936 : : *
2937 : : * The embedding helper is used because its behavior closely resembles
2938 : : * the original non-dynamic generic percpu area setup. This is
2939 : : * important because many archs have addressing restrictions and might
2940 : : * fail if the percpu area is located far away from the previous
2941 : : * location. As an added bonus, in non-NUMA cases, embedding is
2942 : : * generally a good idea TLB-wise because percpu area can piggy back
2943 : : * on the physical linear memory mapping which uses large page
2944 : : * mappings on applicable archs.
2945 : : */
2946 : : unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2947 : : EXPORT_SYMBOL(__per_cpu_offset);
2948 : :
2949 : 207 : static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2950 : : size_t align)
2951 : : {
2952 : 207 : return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
2953 : : }
2954 : :
2955 : 828 : static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2956 : : {
2957 : 828 : memblock_free_early(__pa(ptr), size);
2958 : 828 : }
2959 : :
2960 : 207 : void __init setup_per_cpu_areas(void)
2961 : : {
2962 : : unsigned long delta;
2963 : : unsigned int cpu;
2964 : : int rc;
2965 : :
2966 : : /*
2967 : : * Always reserve area for module percpu variables. That's
2968 : : * what the legacy allocator did.
2969 : : */
2970 : 207 : rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2971 : : PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2972 : : pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2973 [ - + ]: 207 : if (rc < 0)
2974 : 0 : panic("Failed to initialize percpu areas.");
2975 : :
2976 : 207 : delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2977 [ + + ]: 1242 : for_each_possible_cpu(cpu)
2978 : 828 : __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2979 : 207 : }
2980 : : #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2981 : :
2982 : : #else /* CONFIG_SMP */
2983 : :
2984 : : /*
2985 : : * UP percpu area setup.
2986 : : *
2987 : : * UP always uses km-based percpu allocator with identity mapping.
2988 : : * Static percpu variables are indistinguishable from the usual static
2989 : : * variables and don't require any special preparation.
2990 : : */
2991 : : void __init setup_per_cpu_areas(void)
2992 : : {
2993 : : const size_t unit_size =
2994 : : roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2995 : : PERCPU_DYNAMIC_RESERVE));
2996 : : struct pcpu_alloc_info *ai;
2997 : : void *fc;
2998 : :
2999 : : ai = pcpu_alloc_alloc_info(1, 1);
3000 : : fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3001 : : if (!ai || !fc)
3002 : : panic("Failed to allocate memory for percpu areas.");
3003 : : /* kmemleak tracks the percpu allocations separately */
3004 : : kmemleak_free(fc);
3005 : :
3006 : : ai->dyn_size = unit_size;
3007 : : ai->unit_size = unit_size;
3008 : : ai->atom_size = unit_size;
3009 : : ai->alloc_size = unit_size;
3010 : : ai->groups[0].nr_units = 1;
3011 : : ai->groups[0].cpu_map[0] = 0;
3012 : :
3013 : : pcpu_setup_first_chunk(ai, fc);
3014 : : pcpu_free_alloc_info(ai);
3015 : : }
3016 : :
3017 : : #endif /* CONFIG_SMP */
3018 : :
3019 : : /*
3020 : : * pcpu_nr_pages - calculate total number of populated backing pages
3021 : : *
3022 : : * This reflects the number of pages populated to back chunks. Metadata is
3023 : : * excluded in the number exposed in meminfo as the number of backing pages
3024 : : * scales with the number of cpus and can quickly outweigh the memory used for
3025 : : * metadata. It also keeps this calculation nice and simple.
3026 : : *
3027 : : * RETURNS:
3028 : : * Total number of populated backing pages in use by the allocator.
3029 : : */
3030 : 207 : unsigned long pcpu_nr_pages(void)
3031 : : {
3032 : 207 : return pcpu_nr_populated * pcpu_nr_units;
3033 : : }
3034 : :
3035 : : /*
3036 : : * Percpu allocator is initialized early during boot when neither slab or
3037 : : * workqueue is available. Plug async management until everything is up
3038 : : * and running.
3039 : : */
3040 : 207 : static int __init percpu_enable_async(void)
3041 : : {
3042 : 207 : pcpu_async_enabled = true;
3043 : 207 : return 0;
3044 : : }
3045 : : subsys_initcall(percpu_enable_async);
|