~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/mm/Kconfig

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 # SPDX-License-Identifier: GPL-2.0-only
  2 
  3 menu "Memory Management options"
  4 
  5 #
  6 # For some reason microblaze and nios2 hard code SWAP=n.  Hopefully we can
  7 # add proper SWAP support to them, in which case this can be remove.
  8 #
  9 config ARCH_NO_SWAP
 10         bool
 11 
 12 config ZPOOL
 13         bool
 14 
 15 menuconfig SWAP
 16         bool "Support for paging of anonymous memory (swap)"
 17         depends on MMU && BLOCK && !ARCH_NO_SWAP
 18         default y
 19         help
 20           This option allows you to choose whether you want to have support
 21           for so called swap devices or swap files in your kernel that are
 22           used to provide more virtual memory than the actual RAM present
 23           in your computer.  If unsure say Y.
 24 
 25 config ZSWAP
 26         bool "Compressed cache for swap pages"
 27         depends on SWAP
 28         select CRYPTO
 29         select ZPOOL
 30         help
 31           A lightweight compressed cache for swap pages.  It takes
 32           pages that are in the process of being swapped out and attempts to
 33           compress them into a dynamically allocated RAM-based memory pool.
 34           This can result in a significant I/O reduction on swap device and,
 35           in the case where decompressing from RAM is faster than swap device
 36           reads, can also improve workload performance.
 37 
 38 config ZSWAP_DEFAULT_ON
 39         bool "Enable the compressed cache for swap pages by default"
 40         depends on ZSWAP
 41         help
 42           If selected, the compressed cache for swap pages will be enabled
 43           at boot, otherwise it will be disabled.
 44 
 45           The selection made here can be overridden by using the kernel
 46           command line 'zswap.enabled=' option.
 47 
 48 config ZSWAP_SHRINKER_DEFAULT_ON
 49         bool "Shrink the zswap pool on memory pressure"
 50         depends on ZSWAP
 51         default n
 52         help
 53           If selected, the zswap shrinker will be enabled, and the pages
 54           stored in the zswap pool will become available for reclaim (i.e
 55           written back to the backing swap device) on memory pressure.
 56 
 57           This means that zswap writeback could happen even if the pool is
 58           not yet full, or the cgroup zswap limit has not been reached,
 59           reducing the chance that cold pages will reside in the zswap pool
 60           and consume memory indefinitely.
 61 
 62 choice
 63         prompt "Default compressor"
 64         depends on ZSWAP
 65         default ZSWAP_COMPRESSOR_DEFAULT_LZO
 66         help
 67           Selects the default compression algorithm for the compressed cache
 68           for swap pages.
 69 
 70           For an overview what kind of performance can be expected from
 71           a particular compression algorithm please refer to the benchmarks
 72           available at the following LWN page:
 73           https://lwn.net/Articles/751795/
 74 
 75           If in doubt, select 'LZO'.
 76 
 77           The selection made here can be overridden by using the kernel
 78           command line 'zswap.compressor=' option.
 79 
 80 config ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
 81         bool "Deflate"
 82         select CRYPTO_DEFLATE
 83         help
 84           Use the Deflate algorithm as the default compression algorithm.
 85 
 86 config ZSWAP_COMPRESSOR_DEFAULT_LZO
 87         bool "LZO"
 88         select CRYPTO_LZO
 89         help
 90           Use the LZO algorithm as the default compression algorithm.
 91 
 92 config ZSWAP_COMPRESSOR_DEFAULT_842
 93         bool "842"
 94         select CRYPTO_842
 95         help
 96           Use the 842 algorithm as the default compression algorithm.
 97 
 98 config ZSWAP_COMPRESSOR_DEFAULT_LZ4
 99         bool "LZ4"
100         select CRYPTO_LZ4
101         help
102           Use the LZ4 algorithm as the default compression algorithm.
103 
104 config ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
105         bool "LZ4HC"
106         select CRYPTO_LZ4HC
107         help
108           Use the LZ4HC algorithm as the default compression algorithm.
109 
110 config ZSWAP_COMPRESSOR_DEFAULT_ZSTD
111         bool "zstd"
112         select CRYPTO_ZSTD
113         help
114           Use the zstd algorithm as the default compression algorithm.
115 endchoice
116 
117 config ZSWAP_COMPRESSOR_DEFAULT
118        string
119        depends on ZSWAP
120        default "deflate" if ZSWAP_COMPRESSOR_DEFAULT_DEFLATE
121        default "lzo" if ZSWAP_COMPRESSOR_DEFAULT_LZO
122        default "842" if ZSWAP_COMPRESSOR_DEFAULT_842
123        default "lz4" if ZSWAP_COMPRESSOR_DEFAULT_LZ4
124        default "lz4hc" if ZSWAP_COMPRESSOR_DEFAULT_LZ4HC
125        default "zstd" if ZSWAP_COMPRESSOR_DEFAULT_ZSTD
126        default ""
127 
128 choice
129         prompt "Default allocator"
130         depends on ZSWAP
131         default ZSWAP_ZPOOL_DEFAULT_ZSMALLOC if HAVE_ZSMALLOC
132         default ZSWAP_ZPOOL_DEFAULT_ZBUD
133         help
134           Selects the default allocator for the compressed cache for
135           swap pages.
136           The default is 'zbud' for compatibility, however please do
137           read the description of each of the allocators below before
138           making a right choice.
139 
140           The selection made here can be overridden by using the kernel
141           command line 'zswap.zpool=' option.
142 
143 config ZSWAP_ZPOOL_DEFAULT_ZBUD
144         bool "zbud"
145         select ZBUD
146         help
147           Use the zbud allocator as the default allocator.
148 
149 config ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
150         bool "z3foldi (DEPRECATED)"
151         select Z3FOLD_DEPRECATED
152         help
153           Use the z3fold allocator as the default allocator.
154 
155           Deprecated and scheduled for removal in a few cycles,
156           see CONFIG_Z3FOLD_DEPRECATED.
157 
158 config ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
159         bool "zsmalloc"
160         depends on HAVE_ZSMALLOC
161         select ZSMALLOC
162         help
163           Use the zsmalloc allocator as the default allocator.
164 endchoice
165 
166 config ZSWAP_ZPOOL_DEFAULT
167        string
168        depends on ZSWAP
169        default "zbud" if ZSWAP_ZPOOL_DEFAULT_ZBUD
170        default "z3fold" if ZSWAP_ZPOOL_DEFAULT_Z3FOLD_DEPRECATED
171        default "zsmalloc" if ZSWAP_ZPOOL_DEFAULT_ZSMALLOC
172        default ""
173 
174 config ZBUD
175         tristate "2:1 compression allocator (zbud)"
176         depends on ZSWAP
177         help
178           A special purpose allocator for storing compressed pages.
179           It is designed to store up to two compressed pages per physical
180           page.  While this design limits storage density, it has simple and
181           deterministic reclaim properties that make it preferable to a higher
182           density approach when reclaim will be used.
183 
184 config Z3FOLD_DEPRECATED
185         tristate "3:1 compression allocator (z3fold) (DEPRECATED)"
186         depends on ZSWAP
187         help
188           Deprecated and scheduled for removal in a few cycles. If you have
189           a good reason for using Z3FOLD over ZSMALLOC, please contact
190           linux-mm@kvack.org and the zswap maintainers.
191 
192           A special purpose allocator for storing compressed pages.
193           It is designed to store up to three compressed pages per physical
194           page. It is a ZBUD derivative so the simplicity and determinism are
195           still there.
196 
197 config Z3FOLD
198         tristate
199         default y if Z3FOLD_DEPRECATED=y
200         default m if Z3FOLD_DEPRECATED=m
201         depends on Z3FOLD_DEPRECATED
202 
203 config HAVE_ZSMALLOC
204         def_bool y
205         depends on MMU
206         depends on PAGE_SIZE_LESS_THAN_256KB # we want <= 64 KiB
207 
208 config ZSMALLOC
209         tristate
210         prompt "N:1 compression allocator (zsmalloc)" if ZSWAP
211         depends on HAVE_ZSMALLOC
212         help
213           zsmalloc is a slab-based memory allocator designed to store
214           pages of various compression levels efficiently. It achieves
215           the highest storage density with the least amount of fragmentation.
216 
217 config ZSMALLOC_STAT
218         bool "Export zsmalloc statistics"
219         depends on ZSMALLOC
220         select DEBUG_FS
221         help
222           This option enables code in the zsmalloc to collect various
223           statistics about what's happening in zsmalloc and exports that
224           information to userspace via debugfs.
225           If unsure, say N.
226 
227 config ZSMALLOC_CHAIN_SIZE
228         int "Maximum number of physical pages per-zspage"
229         default 8
230         range 4 16
231         depends on ZSMALLOC
232         help
233           This option sets the upper limit on the number of physical pages
234           that a zmalloc page (zspage) can consist of. The optimal zspage
235           chain size is calculated for each size class during the
236           initialization of the pool.
237 
238           Changing this option can alter the characteristics of size classes,
239           such as the number of pages per zspage and the number of objects
240           per zspage. This can also result in different configurations of
241           the pool, as zsmalloc merges size classes with similar
242           characteristics.
243 
244           For more information, see zsmalloc documentation.
245 
246 menu "Slab allocator options"
247 
248 config SLUB
249         def_bool y
250 
251 config SLUB_TINY
252         bool "Configure for minimal memory footprint"
253         depends on EXPERT
254         select SLAB_MERGE_DEFAULT
255         help
256            Configures the slab allocator in a way to achieve minimal memory
257            footprint, sacrificing scalability, debugging and other features.
258            This is intended only for the smallest system that had used the
259            SLOB allocator and is not recommended for systems with more than
260            16MB RAM.
261 
262            If unsure, say N.
263 
264 config SLAB_MERGE_DEFAULT
265         bool "Allow slab caches to be merged"
266         default y
267         help
268           For reduced kernel memory fragmentation, slab caches can be
269           merged when they share the same size and other characteristics.
270           This carries a risk of kernel heap overflows being able to
271           overwrite objects from merged caches (and more easily control
272           cache layout), which makes such heap attacks easier to exploit
273           by attackers. By keeping caches unmerged, these kinds of exploits
274           can usually only damage objects in the same cache. To disable
275           merging at runtime, "slab_nomerge" can be passed on the kernel
276           command line.
277 
278 config SLAB_FREELIST_RANDOM
279         bool "Randomize slab freelist"
280         depends on !SLUB_TINY
281         help
282           Randomizes the freelist order used on creating new pages. This
283           security feature reduces the predictability of the kernel slab
284           allocator against heap overflows.
285 
286 config SLAB_FREELIST_HARDENED
287         bool "Harden slab freelist metadata"
288         depends on !SLUB_TINY
289         help
290           Many kernel heap attacks try to target slab cache metadata and
291           other infrastructure. This options makes minor performance
292           sacrifices to harden the kernel slab allocator against common
293           freelist exploit methods.
294 
295 config SLAB_BUCKETS
296         bool "Support allocation from separate kmalloc buckets"
297         depends on !SLUB_TINY
298         default SLAB_FREELIST_HARDENED
299         help
300           Kernel heap attacks frequently depend on being able to create
301           specifically-sized allocations with user-controlled contents
302           that will be allocated into the same kmalloc bucket as a
303           target object. To avoid sharing these allocation buckets,
304           provide an explicitly separated set of buckets to be used for
305           user-controlled allocations. This may very slightly increase
306           memory fragmentation, though in practice it's only a handful
307           of extra pages since the bulk of user-controlled allocations
308           are relatively long-lived.
309 
310           If unsure, say Y.
311 
312 config SLUB_STATS
313         default n
314         bool "Enable performance statistics"
315         depends on SYSFS && !SLUB_TINY
316         help
317           The statistics are useful to debug slab allocation behavior in
318           order find ways to optimize the allocator. This should never be
319           enabled for production use since keeping statistics slows down
320           the allocator by a few percentage points. The slabinfo command
321           supports the determination of the most active slabs to figure
322           out which slabs are relevant to a particular load.
323           Try running: slabinfo -DA
324 
325 config SLUB_CPU_PARTIAL
326         default y
327         depends on SMP && !SLUB_TINY
328         bool "Enable per cpu partial caches"
329         help
330           Per cpu partial caches accelerate objects allocation and freeing
331           that is local to a processor at the price of more indeterminism
332           in the latency of the free. On overflow these caches will be cleared
333           which requires the taking of locks that may cause latency spikes.
334           Typically one would choose no for a realtime system.
335 
336 config RANDOM_KMALLOC_CACHES
337         default n
338         depends on !SLUB_TINY
339         bool "Randomize slab caches for normal kmalloc"
340         help
341           A hardening feature that creates multiple copies of slab caches for
342           normal kmalloc allocation and makes kmalloc randomly pick one based
343           on code address, which makes the attackers more difficult to spray
344           vulnerable memory objects on the heap for the purpose of exploiting
345           memory vulnerabilities.
346 
347           Currently the number of copies is set to 16, a reasonably large value
348           that effectively diverges the memory objects allocated for different
349           subsystems or modules into different caches, at the expense of a
350           limited degree of memory and CPU overhead that relates to hardware and
351           system workload.
352 
353 endmenu # Slab allocator options
354 
355 config SHUFFLE_PAGE_ALLOCATOR
356         bool "Page allocator randomization"
357         default SLAB_FREELIST_RANDOM && ACPI_NUMA
358         help
359           Randomization of the page allocator improves the average
360           utilization of a direct-mapped memory-side-cache. See section
361           5.2.27 Heterogeneous Memory Attribute Table (HMAT) in the ACPI
362           6.2a specification for an example of how a platform advertises
363           the presence of a memory-side-cache. There are also incidental
364           security benefits as it reduces the predictability of page
365           allocations to compliment SLAB_FREELIST_RANDOM, but the
366           default granularity of shuffling on the MAX_PAGE_ORDER i.e, 10th
367           order of pages is selected based on cache utilization benefits
368           on x86.
369 
370           While the randomization improves cache utilization it may
371           negatively impact workloads on platforms without a cache. For
372           this reason, by default, the randomization is not enabled even
373           if SHUFFLE_PAGE_ALLOCATOR=y. The randomization may be force enabled
374           with the 'page_alloc.shuffle' kernel command line parameter.
375 
376           Say Y if unsure.
377 
378 config COMPAT_BRK
379         bool "Disable heap randomization"
380         default y
381         help
382           Randomizing heap placement makes heap exploits harder, but it
383           also breaks ancient binaries (including anything libc5 based).
384           This option changes the bootup default to heap randomization
385           disabled, and can be overridden at runtime by setting
386           /proc/sys/kernel/randomize_va_space to 2.
387 
388           On non-ancient distros (post-2000 ones) N is usually a safe choice.
389 
390 config MMAP_ALLOW_UNINITIALIZED
391         bool "Allow mmapped anonymous memory to be uninitialized"
392         depends on EXPERT && !MMU
393         default n
394         help
395           Normally, and according to the Linux spec, anonymous memory obtained
396           from mmap() has its contents cleared before it is passed to
397           userspace.  Enabling this config option allows you to request that
398           mmap() skip that if it is given an MAP_UNINITIALIZED flag, thus
399           providing a huge performance boost.  If this option is not enabled,
400           then the flag will be ignored.
401 
402           This is taken advantage of by uClibc's malloc(), and also by
403           ELF-FDPIC binfmt's brk and stack allocator.
404 
405           Because of the obvious security issues, this option should only be
406           enabled on embedded devices where you control what is run in
407           userspace.  Since that isn't generally a problem on no-MMU systems,
408           it is normally safe to say Y here.
409 
410           See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
411 
412 config SELECT_MEMORY_MODEL
413         def_bool y
414         depends on ARCH_SELECT_MEMORY_MODEL
415 
416 choice
417         prompt "Memory model"
418         depends on SELECT_MEMORY_MODEL
419         default SPARSEMEM_MANUAL if ARCH_SPARSEMEM_DEFAULT
420         default FLATMEM_MANUAL
421         help
422           This option allows you to change some of the ways that
423           Linux manages its memory internally. Most users will
424           only have one option here selected by the architecture
425           configuration. This is normal.
426 
427 config FLATMEM_MANUAL
428         bool "Flat Memory"
429         depends on !ARCH_SPARSEMEM_ENABLE || ARCH_FLATMEM_ENABLE
430         help
431           This option is best suited for non-NUMA systems with
432           flat address space. The FLATMEM is the most efficient
433           system in terms of performance and resource consumption
434           and it is the best option for smaller systems.
435 
436           For systems that have holes in their physical address
437           spaces and for features like NUMA and memory hotplug,
438           choose "Sparse Memory".
439 
440           If unsure, choose this option (Flat Memory) over any other.
441 
442 config SPARSEMEM_MANUAL
443         bool "Sparse Memory"
444         depends on ARCH_SPARSEMEM_ENABLE
445         help
446           This will be the only option for some systems, including
447           memory hot-plug systems.  This is normal.
448 
449           This option provides efficient support for systems with
450           holes is their physical address space and allows memory
451           hot-plug and hot-remove.
452 
453           If unsure, choose "Flat Memory" over this option.
454 
455 endchoice
456 
457 config SPARSEMEM
458         def_bool y
459         depends on (!SELECT_MEMORY_MODEL && ARCH_SPARSEMEM_ENABLE) || SPARSEMEM_MANUAL
460 
461 config FLATMEM
462         def_bool y
463         depends on !SPARSEMEM || FLATMEM_MANUAL
464 
465 #
466 # SPARSEMEM_EXTREME (which is the default) does some bootmem
467 # allocations when sparse_init() is called.  If this cannot
468 # be done on your architecture, select this option.  However,
469 # statically allocating the mem_section[] array can potentially
470 # consume vast quantities of .bss, so be careful.
471 #
472 # This option will also potentially produce smaller runtime code
473 # with gcc 3.4 and later.
474 #
475 config SPARSEMEM_STATIC
476         bool
477 
478 #
479 # Architecture platforms which require a two level mem_section in SPARSEMEM
480 # must select this option. This is usually for architecture platforms with
481 # an extremely sparse physical address space.
482 #
483 config SPARSEMEM_EXTREME
484         def_bool y
485         depends on SPARSEMEM && !SPARSEMEM_STATIC
486 
487 config SPARSEMEM_VMEMMAP_ENABLE
488         bool
489 
490 config SPARSEMEM_VMEMMAP
491         bool "Sparse Memory virtual memmap"
492         depends on SPARSEMEM && SPARSEMEM_VMEMMAP_ENABLE
493         default y
494         help
495           SPARSEMEM_VMEMMAP uses a virtually mapped memmap to optimise
496           pfn_to_page and page_to_pfn operations.  This is the most
497           efficient option when sufficient kernel resources are available.
498 #
499 # Select this config option from the architecture Kconfig, if it is preferred
500 # to enable the feature of HugeTLB/dev_dax vmemmap optimization.
501 #
502 config ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
503         bool
504 
505 config ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
506         bool
507 
508 config HAVE_MEMBLOCK_PHYS_MAP
509         bool
510 
511 config HAVE_GUP_FAST
512         depends on MMU
513         bool
514 
515 # Don't discard allocated memory used to track "memory" and "reserved" memblocks
516 # after early boot, so it can still be used to test for validity of memory.
517 # Also, memblocks are updated with memory hot(un)plug.
518 config ARCH_KEEP_MEMBLOCK
519         bool
520 
521 # Keep arch NUMA mapping infrastructure post-init.
522 config NUMA_KEEP_MEMINFO
523         bool
524 
525 config MEMORY_ISOLATION
526         bool
527 
528 # IORESOURCE_SYSTEM_RAM regions in the kernel resource tree that are marked
529 # IORESOURCE_EXCLUSIVE cannot be mapped to user space, for example, via
530 # /dev/mem.
531 config EXCLUSIVE_SYSTEM_RAM
532         def_bool y
533         depends on !DEVMEM || STRICT_DEVMEM
534 
535 #
536 # Only be set on architectures that have completely implemented memory hotplug
537 # feature. If you are not sure, don't touch it.
538 #
539 config HAVE_BOOTMEM_INFO_NODE
540         def_bool n
541 
542 config ARCH_ENABLE_MEMORY_HOTPLUG
543         bool
544 
545 config ARCH_ENABLE_MEMORY_HOTREMOVE
546         bool
547 
548 # eventually, we can have this option just 'select SPARSEMEM'
549 menuconfig MEMORY_HOTPLUG
550         bool "Memory hotplug"
551         select MEMORY_ISOLATION
552         depends on SPARSEMEM
553         depends on ARCH_ENABLE_MEMORY_HOTPLUG
554         depends on 64BIT
555         select NUMA_KEEP_MEMINFO if NUMA
556 
557 if MEMORY_HOTPLUG
558 
559 config MEMORY_HOTPLUG_DEFAULT_ONLINE
560         bool "Online the newly added memory blocks by default"
561         depends on MEMORY_HOTPLUG
562         help
563           This option sets the default policy setting for memory hotplug
564           onlining policy (/sys/devices/system/memory/auto_online_blocks) which
565           determines what happens to newly added memory regions. Policy setting
566           can always be changed at runtime.
567           See Documentation/admin-guide/mm/memory-hotplug.rst for more information.
568 
569           Say Y here if you want all hot-plugged memory blocks to appear in
570           'online' state by default.
571           Say N here if you want the default policy to keep all hot-plugged
572           memory blocks in 'offline' state.
573 
574 config MEMORY_HOTREMOVE
575         bool "Allow for memory hot remove"
576         select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
577         depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
578         depends on MIGRATION
579 
580 config MHP_MEMMAP_ON_MEMORY
581         def_bool y
582         depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
583         depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
584 
585 endif # MEMORY_HOTPLUG
586 
587 config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
588        bool
589 
590 # Heavily threaded applications may benefit from splitting the mm-wide
591 # page_table_lock, so that faults on different parts of the user address
592 # space can be handled with less contention: split it at this NR_CPUS.
593 # Default to 4 for wider testing, though 8 might be more appropriate.
594 # ARM's adjust_pte (unused if VIPT) depends on mm-wide page_table_lock.
595 # PA-RISC 7xxx's spinlock_t would enlarge struct page from 32 to 44 bytes.
596 # SPARC32 allocates multiple pte tables within a single page, and therefore
597 # a per-page lock leads to problems when multiple tables need to be locked
598 # at the same time (e.g. copy_page_range()).
599 # DEBUG_SPINLOCK and DEBUG_LOCK_ALLOC spinlock_t also enlarge struct page.
600 #
601 config SPLIT_PTLOCK_CPUS
602         int
603         default "999999" if !MMU
604         default "999999" if ARM && !CPU_CACHE_VIPT
605         default "999999" if PARISC && !PA20
606         default "999999" if SPARC32
607         default "4"
608 
609 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
610         bool
611 
612 #
613 # support for memory balloon
614 config MEMORY_BALLOON
615         bool
616 
617 #
618 # support for memory balloon compaction
619 config BALLOON_COMPACTION
620         bool "Allow for balloon memory compaction/migration"
621         default y
622         depends on COMPACTION && MEMORY_BALLOON
623         help
624           Memory fragmentation introduced by ballooning might reduce
625           significantly the number of 2MB contiguous memory blocks that can be
626           used within a guest, thus imposing performance penalties associated
627           with the reduced number of transparent huge pages that could be used
628           by the guest workload. Allowing the compaction & migration for memory
629           pages enlisted as being part of memory balloon devices avoids the
630           scenario aforementioned and helps improving memory defragmentation.
631 
632 #
633 # support for memory compaction
634 config COMPACTION
635         bool "Allow for memory compaction"
636         default y
637         select MIGRATION
638         depends on MMU
639         help
640           Compaction is the only memory management component to form
641           high order (larger physically contiguous) memory blocks
642           reliably. The page allocator relies on compaction heavily and
643           the lack of the feature can lead to unexpected OOM killer
644           invocations for high order memory requests. You shouldn't
645           disable this option unless there really is a strong reason for
646           it and then we would be really interested to hear about that at
647           linux-mm@kvack.org.
648 
649 config COMPACT_UNEVICTABLE_DEFAULT
650         int
651         depends on COMPACTION
652         default 0 if PREEMPT_RT
653         default 1
654 
655 #
656 # support for free page reporting
657 config PAGE_REPORTING
658         bool "Free page reporting"
659         help
660           Free page reporting allows for the incremental acquisition of
661           free pages from the buddy allocator for the purpose of reporting
662           those pages to another entity, such as a hypervisor, so that the
663           memory can be freed within the host for other uses.
664 
665 #
666 # support for page migration
667 #
668 config MIGRATION
669         bool "Page migration"
670         default y
671         depends on (NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA) && MMU
672         help
673           Allows the migration of the physical location of pages of processes
674           while the virtual addresses are not changed. This is useful in
675           two situations. The first is on NUMA systems to put pages nearer
676           to the processors accessing. The second is when allocating huge
677           pages as migration can relocate pages to satisfy a huge page
678           allocation instead of reclaiming.
679 
680 config DEVICE_MIGRATION
681         def_bool MIGRATION && ZONE_DEVICE
682 
683 config ARCH_ENABLE_HUGEPAGE_MIGRATION
684         bool
685 
686 config ARCH_ENABLE_THP_MIGRATION
687         bool
688 
689 config HUGETLB_PAGE_SIZE_VARIABLE
690         def_bool n
691         help
692           Allows the pageblock_order value to be dynamic instead of just standard
693           HUGETLB_PAGE_ORDER when there are multiple HugeTLB page sizes available
694           on a platform.
695 
696           Note that the pageblock_order cannot exceed MAX_PAGE_ORDER and will be
697           clamped down to MAX_PAGE_ORDER.
698 
699 config CONTIG_ALLOC
700         def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
701 
702 config PCP_BATCH_SCALE_MAX
703         int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
704         default 5
705         range 0 6
706         help
707           In page allocator, PCP (Per-CPU pageset) is refilled and drained in
708           batches.  The batch number is scaled automatically to improve page
709           allocation/free throughput.  But too large scale factor may hurt
710           latency.  This option sets the upper limit of scale factor to limit
711           the maximum latency.
712 
713 config PHYS_ADDR_T_64BIT
714         def_bool 64BIT
715 
716 config BOUNCE
717         bool "Enable bounce buffers"
718         default y
719         depends on BLOCK && MMU && HIGHMEM
720         help
721           Enable bounce buffers for devices that cannot access the full range of
722           memory available to the CPU. Enabled by default when HIGHMEM is
723           selected, but you may say n to override this.
724 
725 config MMU_NOTIFIER
726         bool
727         select INTERVAL_TREE
728 
729 config KSM
730         bool "Enable KSM for page merging"
731         depends on MMU
732         select XXHASH
733         help
734           Enable Kernel Samepage Merging: KSM periodically scans those areas
735           of an application's address space that an app has advised may be
736           mergeable.  When it finds pages of identical content, it replaces
737           the many instances by a single page with that content, so
738           saving memory until one or another app needs to modify the content.
739           Recommended for use with KVM, or with other duplicative applications.
740           See Documentation/mm/ksm.rst for more information: KSM is inactive
741           until a program has madvised that an area is MADV_MERGEABLE, and
742           root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
743 
744 config DEFAULT_MMAP_MIN_ADDR
745         int "Low address space to protect from user allocation"
746         depends on MMU
747         default 4096
748         help
749           This is the portion of low virtual memory which should be protected
750           from userspace allocation.  Keeping a user from writing to low pages
751           can help reduce the impact of kernel NULL pointer bugs.
752 
753           For most arm64, ppc64 and x86 users with lots of address space
754           a value of 65536 is reasonable and should cause no problems.
755           On arm and other archs it should not be higher than 32768.
756           Programs which use vm86 functionality or have some need to map
757           this low address space will need CAP_SYS_RAWIO or disable this
758           protection by setting the value to 0.
759 
760           This value can be changed after boot using the
761           /proc/sys/vm/mmap_min_addr tunable.
762 
763 config ARCH_SUPPORTS_MEMORY_FAILURE
764         bool
765 
766 config MEMORY_FAILURE
767         depends on MMU
768         depends on ARCH_SUPPORTS_MEMORY_FAILURE
769         bool "Enable recovery from hardware memory errors"
770         select MEMORY_ISOLATION
771         select RAS
772         help
773           Enables code to recover from some memory failures on systems
774           with MCA recovery. This allows a system to continue running
775           even when some of its memory has uncorrected errors. This requires
776           special hardware support and typically ECC memory.
777 
778 config HWPOISON_INJECT
779         tristate "HWPoison pages injector"
780         depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
781         select PROC_PAGE_MONITOR
782 
783 config NOMMU_INITIAL_TRIM_EXCESS
784         int "Turn on mmap() excess space trimming before booting"
785         depends on !MMU
786         default 1
787         help
788           The NOMMU mmap() frequently needs to allocate large contiguous chunks
789           of memory on which to store mappings, but it can only ask the system
790           allocator for chunks in 2^N*PAGE_SIZE amounts - which is frequently
791           more than it requires.  To deal with this, mmap() is able to trim off
792           the excess and return it to the allocator.
793 
794           If trimming is enabled, the excess is trimmed off and returned to the
795           system allocator, which can cause extra fragmentation, particularly
796           if there are a lot of transient processes.
797 
798           If trimming is disabled, the excess is kept, but not used, which for
799           long-term mappings means that the space is wasted.
800 
801           Trimming can be dynamically controlled through a sysctl option
802           (/proc/sys/vm/nr_trim_pages) which specifies the minimum number of
803           excess pages there must be before trimming should occur, or zero if
804           no trimming is to occur.
805 
806           This option specifies the initial value of this option.  The default
807           of 1 says that all excess pages should be trimmed.
808 
809           See Documentation/admin-guide/mm/nommu-mmap.rst for more information.
810 
811 config ARCH_WANT_GENERAL_HUGETLB
812         bool
813 
814 config ARCH_WANTS_THP_SWAP
815         def_bool n
816 
817 menuconfig TRANSPARENT_HUGEPAGE
818         bool "Transparent Hugepage Support"
819         depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT
820         select COMPACTION
821         select XARRAY_MULTI
822         help
823           Transparent Hugepages allows the kernel to use huge pages and
824           huge tlb transparently to the applications whenever possible.
825           This feature can improve computing performance to certain
826           applications by speeding up page faults during memory
827           allocation, by reducing the number of tlb misses and by speeding
828           up the pagetable walking.
829 
830           If memory constrained on embedded, you may want to say N.
831 
832 if TRANSPARENT_HUGEPAGE
833 
834 choice
835         prompt "Transparent Hugepage Support sysfs defaults"
836         depends on TRANSPARENT_HUGEPAGE
837         default TRANSPARENT_HUGEPAGE_ALWAYS
838         help
839           Selects the sysfs defaults for Transparent Hugepage Support.
840 
841         config TRANSPARENT_HUGEPAGE_ALWAYS
842                 bool "always"
843         help
844           Enabling Transparent Hugepage always, can increase the
845           memory footprint of applications without a guaranteed
846           benefit but it will work automatically for all applications.
847 
848         config TRANSPARENT_HUGEPAGE_MADVISE
849                 bool "madvise"
850         help
851           Enabling Transparent Hugepage madvise, will only provide a
852           performance improvement benefit to the applications using
853           madvise(MADV_HUGEPAGE) but it won't risk to increase the
854           memory footprint of applications without a guaranteed
855           benefit.
856 
857         config TRANSPARENT_HUGEPAGE_NEVER
858                 bool "never"
859         help
860           Disable Transparent Hugepage by default. It can still be
861           enabled at runtime via sysfs.
862 endchoice
863 
864 config THP_SWAP
865         def_bool y
866         depends on TRANSPARENT_HUGEPAGE && ARCH_WANTS_THP_SWAP && SWAP && 64BIT
867         help
868           Swap transparent huge pages in one piece, without splitting.
869           XXX: For now, swap cluster backing transparent huge page
870           will be split after swapout.
871 
872           For selection by architectures with reasonable THP sizes.
873 
874 config READ_ONLY_THP_FOR_FS
875         bool "Read-only THP for filesystems (EXPERIMENTAL)"
876         depends on TRANSPARENT_HUGEPAGE && SHMEM
877 
878         help
879           Allow khugepaged to put read-only file-backed pages in THP.
880 
881           This is marked experimental because it is a new feature. Write
882           support of file THPs will be developed in the next few release
883           cycles.
884 
885 endif # TRANSPARENT_HUGEPAGE
886 
887 #
888 # The architecture supports pgtable leaves that is larger than PAGE_SIZE
889 #
890 config PGTABLE_HAS_HUGE_LEAVES
891         def_bool TRANSPARENT_HUGEPAGE || HUGETLB_PAGE
892 
893 #
894 # UP and nommu archs use km based percpu allocator
895 #
896 config NEED_PER_CPU_KM
897         depends on !SMP || !MMU
898         bool
899         default y
900 
901 config NEED_PER_CPU_EMBED_FIRST_CHUNK
902         bool
903 
904 config NEED_PER_CPU_PAGE_FIRST_CHUNK
905         bool
906 
907 config USE_PERCPU_NUMA_NODE_ID
908         bool
909 
910 config HAVE_SETUP_PER_CPU_AREA
911         bool
912 
913 config CMA
914         bool "Contiguous Memory Allocator"
915         depends on MMU
916         select MIGRATION
917         select MEMORY_ISOLATION
918         help
919           This enables the Contiguous Memory Allocator which allows other
920           subsystems to allocate big physically-contiguous blocks of memory.
921           CMA reserves a region of memory and allows only movable pages to
922           be allocated from it. This way, the kernel can use the memory for
923           pagecache and when a subsystem requests for contiguous area, the
924           allocated pages are migrated away to serve the contiguous request.
925 
926           If unsure, say "n".
927 
928 config CMA_DEBUGFS
929         bool "CMA debugfs interface"
930         depends on CMA && DEBUG_FS
931         help
932           Turns on the DebugFS interface for CMA.
933 
934 config CMA_SYSFS
935         bool "CMA information through sysfs interface"
936         depends on CMA && SYSFS
937         help
938           This option exposes some sysfs attributes to get information
939           from CMA.
940 
941 config CMA_AREAS
942         int "Maximum count of the CMA areas"
943         depends on CMA
944         default 20 if NUMA
945         default 8
946         help
947           CMA allows to create CMA areas for particular purpose, mainly,
948           used as device private area. This parameter sets the maximum
949           number of CMA area in the system.
950 
951           If unsure, leave the default value "8" in UMA and "20" in NUMA.
952 
953 config MEM_SOFT_DIRTY
954         bool "Track memory changes"
955         depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
956         select PROC_PAGE_MONITOR
957         help
958           This option enables memory changes tracking by introducing a
959           soft-dirty bit on pte-s. This bit it set when someone writes
960           into a page just as regular dirty bit, but unlike the latter
961           it can be cleared by hands.
962 
963           See Documentation/admin-guide/mm/soft-dirty.rst for more details.
964 
965 config GENERIC_EARLY_IOREMAP
966         bool
967 
968 config STACK_MAX_DEFAULT_SIZE_MB
969         int "Default maximum user stack size for 32-bit processes (MB)"
970         default 100
971         range 8 2048
972         depends on STACK_GROWSUP && (!64BIT || COMPAT)
973         help
974           This is the maximum stack size in Megabytes in the VM layout of 32-bit
975           user processes when the stack grows upwards (currently only on parisc
976           arch) when the RLIMIT_STACK hard limit is unlimited.
977 
978           A sane initial value is 100 MB.
979 
980 config DEFERRED_STRUCT_PAGE_INIT
981         bool "Defer initialisation of struct pages to kthreads"
982         depends on SPARSEMEM
983         depends on !NEED_PER_CPU_KM
984         depends on 64BIT
985         depends on !KMSAN
986         select PADATA
987         help
988           Ordinarily all struct pages are initialised during early boot in a
989           single thread. On very large machines this can take a considerable
990           amount of time. If this option is set, large machines will bring up
991           a subset of memmap at boot and then initialise the rest in parallel.
992           This has a potential performance impact on tasks running early in the
993           lifetime of the system until these kthreads finish the
994           initialisation.
995 
996 config PAGE_IDLE_FLAG
997         bool
998         select PAGE_EXTENSION if !64BIT
999         help
1000           This adds PG_idle and PG_young flags to 'struct page'.  PTE Accessed
1001           bit writers can set the state of the bit in the flags so that PTE
1002           Accessed bit readers may avoid disturbance.
1003 
1004 config IDLE_PAGE_TRACKING
1005         bool "Enable idle page tracking"
1006         depends on SYSFS && MMU
1007         select PAGE_IDLE_FLAG
1008         help
1009           This feature allows to estimate the amount of user pages that have
1010           not been touched during a given period of time. This information can
1011           be useful to tune memory cgroup limits and/or for job placement
1012           within a compute cluster.
1013 
1014           See Documentation/admin-guide/mm/idle_page_tracking.rst for
1015           more details.
1016 
1017 # Architectures which implement cpu_dcache_is_aliasing() to query
1018 # whether the data caches are aliased (VIVT or VIPT with dcache
1019 # aliasing) need to select this.
1020 config ARCH_HAS_CPU_CACHE_ALIASING
1021         bool
1022 
1023 config ARCH_HAS_CACHE_LINE_SIZE
1024         bool
1025 
1026 config ARCH_HAS_CURRENT_STACK_POINTER
1027         bool
1028         help
1029           In support of HARDENED_USERCOPY performing stack variable lifetime
1030           checking, an architecture-agnostic way to find the stack pointer
1031           is needed. Once an architecture defines an unsigned long global
1032           register alias named "current_stack_pointer", this config can be
1033           selected.
1034 
1035 config ARCH_HAS_PTE_DEVMAP
1036         bool
1037 
1038 config ARCH_HAS_ZONE_DMA_SET
1039         bool
1040 
1041 config ZONE_DMA
1042         bool "Support DMA zone" if ARCH_HAS_ZONE_DMA_SET
1043         default y if ARM64 || X86
1044 
1045 config ZONE_DMA32
1046         bool "Support DMA32 zone" if ARCH_HAS_ZONE_DMA_SET
1047         depends on !X86_32
1048         default y if ARM64
1049 
1050 config ZONE_DEVICE
1051         bool "Device memory (pmem, HMM, etc...) hotplug support"
1052         depends on MEMORY_HOTPLUG
1053         depends on MEMORY_HOTREMOVE
1054         depends on SPARSEMEM_VMEMMAP
1055         depends on ARCH_HAS_PTE_DEVMAP
1056         select XARRAY_MULTI
1057 
1058         help
1059           Device memory hotplug support allows for establishing pmem,
1060           or other device driver discovered memory regions, in the
1061           memmap. This allows pfn_to_page() lookups of otherwise
1062           "device-physical" addresses which is needed for using a DAX
1063           mapping in an O_DIRECT operation, among other things.
1064 
1065           If FS_DAX is enabled, then say Y.
1066 
1067 #
1068 # Helpers to mirror range of the CPU page tables of a process into device page
1069 # tables.
1070 #
1071 config HMM_MIRROR
1072         bool
1073         depends on MMU
1074 
1075 config GET_FREE_REGION
1076         depends on SPARSEMEM
1077         bool
1078 
1079 config DEVICE_PRIVATE
1080         bool "Unaddressable device memory (GPU memory, ...)"
1081         depends on ZONE_DEVICE
1082         select GET_FREE_REGION
1083 
1084         help
1085           Allows creation of struct pages to represent unaddressable device
1086           memory; i.e., memory that is only accessible from the device (or
1087           group of devices). You likely also want to select HMM_MIRROR.
1088 
1089 config VMAP_PFN
1090         bool
1091 
1092 config ARCH_USES_HIGH_VMA_FLAGS
1093         bool
1094 config ARCH_HAS_PKEYS
1095         bool
1096 
1097 config ARCH_USES_PG_ARCH_X
1098         bool
1099         help
1100           Enable the definition of PG_arch_x page flags with x > 1. Only
1101           suitable for 64-bit architectures with CONFIG_FLATMEM or
1102           CONFIG_SPARSEMEM_VMEMMAP enabled, otherwise there may not be
1103           enough room for additional bits in page->flags.
1104 
1105 config VM_EVENT_COUNTERS
1106         default y
1107         bool "Enable VM event counters for /proc/vmstat" if EXPERT
1108         help
1109           VM event counters are needed for event counts to be shown.
1110           This option allows the disabling of the VM event counters
1111           on EXPERT systems.  /proc/vmstat will only show page counts
1112           if VM event counters are disabled.
1113 
1114 config PERCPU_STATS
1115         bool "Collect percpu memory statistics"
1116         help
1117           This feature collects and exposes statistics via debugfs. The
1118           information includes global and per chunk statistics, which can
1119           be used to help understand percpu memory usage.
1120 
1121 config GUP_TEST
1122         bool "Enable infrastructure for get_user_pages()-related unit tests"
1123         depends on DEBUG_FS
1124         help
1125           Provides /sys/kernel/debug/gup_test, which in turn provides a way
1126           to make ioctl calls that can launch kernel-based unit tests for
1127           the get_user_pages*() and pin_user_pages*() family of API calls.
1128 
1129           These tests include benchmark testing of the _fast variants of
1130           get_user_pages*() and pin_user_pages*(), as well as smoke tests of
1131           the non-_fast variants.
1132 
1133           There is also a sub-test that allows running dump_page() on any
1134           of up to eight pages (selected by command line args) within the
1135           range of user-space addresses. These pages are either pinned via
1136           pin_user_pages*(), or pinned via get_user_pages*(), as specified
1137           by other command line arguments.
1138 
1139           See tools/testing/selftests/mm/gup_test.c
1140 
1141 comment "GUP_TEST needs to have DEBUG_FS enabled"
1142         depends on !GUP_TEST && !DEBUG_FS
1143 
1144 config GUP_GET_PXX_LOW_HIGH
1145         bool
1146 
1147 config DMAPOOL_TEST
1148         tristate "Enable a module to run time tests on dma_pool"
1149         depends on HAS_DMA
1150         help
1151           Provides a test module that will allocate and free many blocks of
1152           various sizes and report how long it takes. This is intended to
1153           provide a consistent way to measure how changes to the
1154           dma_pool_alloc/free routines affect performance.
1155 
1156 config ARCH_HAS_PTE_SPECIAL
1157         bool
1158 
1159 config MAPPING_DIRTY_HELPERS
1160         bool
1161 
1162 config KMAP_LOCAL
1163         bool
1164 
1165 config KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
1166         bool
1167 
1168 # struct io_mapping based helper.  Selected by drivers that need them
1169 config IO_MAPPING
1170         bool
1171 
1172 config MEMFD_CREATE
1173         bool "Enable memfd_create() system call" if EXPERT
1174 
1175 config SECRETMEM
1176         default y
1177         bool "Enable memfd_secret() system call" if EXPERT
1178         depends on ARCH_HAS_SET_DIRECT_MAP
1179         help
1180           Enable the memfd_secret() system call with the ability to create
1181           memory areas visible only in the context of the owning process and
1182           not mapped to other processes and other kernel page tables.
1183 
1184 config ANON_VMA_NAME
1185         bool "Anonymous VMA name support"
1186         depends on PROC_FS && ADVISE_SYSCALLS && MMU
1187 
1188         help
1189           Allow naming anonymous virtual memory areas.
1190 
1191           This feature allows assigning names to virtual memory areas. Assigned
1192           names can be later retrieved from /proc/pid/maps and /proc/pid/smaps
1193           and help identifying individual anonymous memory areas.
1194           Assigning a name to anonymous virtual memory area might prevent that
1195           area from being merged with adjacent virtual memory areas due to the
1196           difference in their name.
1197 
1198 config HAVE_ARCH_USERFAULTFD_WP
1199         bool
1200         help
1201           Arch has userfaultfd write protection support
1202 
1203 config HAVE_ARCH_USERFAULTFD_MINOR
1204         bool
1205         help
1206           Arch has userfaultfd minor fault support
1207 
1208 menuconfig USERFAULTFD
1209         bool "Enable userfaultfd() system call"
1210         depends on MMU
1211         help
1212           Enable the userfaultfd() system call that allows to intercept and
1213           handle page faults in userland.
1214 
1215 if USERFAULTFD
1216 config PTE_MARKER_UFFD_WP
1217         bool "Userfaultfd write protection support for shmem/hugetlbfs"
1218         default y
1219         depends on HAVE_ARCH_USERFAULTFD_WP
1220 
1221         help
1222           Allows to create marker PTEs for userfaultfd write protection
1223           purposes.  It is required to enable userfaultfd write protection on
1224           file-backed memory types like shmem and hugetlbfs.
1225 endif # USERFAULTFD
1226 
1227 # multi-gen LRU {
1228 config LRU_GEN
1229         bool "Multi-Gen LRU"
1230         depends on MMU
1231         # make sure folio->flags has enough spare bits
1232         depends on 64BIT || !SPARSEMEM || SPARSEMEM_VMEMMAP
1233         help
1234           A high performance LRU implementation to overcommit memory. See
1235           Documentation/admin-guide/mm/multigen_lru.rst for details.
1236 
1237 config LRU_GEN_ENABLED
1238         bool "Enable by default"
1239         depends on LRU_GEN
1240         help
1241           This option enables the multi-gen LRU by default.
1242 
1243 config LRU_GEN_STATS
1244         bool "Full stats for debugging"
1245         depends on LRU_GEN
1246         help
1247           Do not enable this option unless you plan to look at historical stats
1248           from evicted generations for debugging purpose.
1249 
1250           This option has a per-memcg and per-node memory overhead.
1251 
1252 config LRU_GEN_WALKS_MMU
1253         def_bool y
1254         depends on LRU_GEN && ARCH_HAS_HW_PTE_YOUNG
1255 # }
1256 
1257 config ARCH_SUPPORTS_PER_VMA_LOCK
1258        def_bool n
1259 
1260 config PER_VMA_LOCK
1261         def_bool y
1262         depends on ARCH_SUPPORTS_PER_VMA_LOCK && MMU && SMP
1263         help
1264           Allow per-vma locking during page fault handling.
1265 
1266           This feature allows locking each virtual memory area separately when
1267           handling page faults instead of taking mmap_lock.
1268 
1269 config LOCK_MM_AND_FIND_VMA
1270         bool
1271         depends on !STACK_GROWSUP
1272 
1273 config IOMMU_MM_DATA
1274         bool
1275 
1276 config EXECMEM
1277         bool
1278 
1279 source "mm/damon/Kconfig"
1280 
1281 endmenu

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php