~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/kernel/module/main.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Copyright (C) 2002 Richard Henderson
  4  * Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
  5  * Copyright (C) 2023 Luis Chamberlain <mcgrof@kernel.org>
  6  */
  7 
  8 #define INCLUDE_VERMAGIC
  9 
 10 #include <linux/export.h>
 11 #include <linux/extable.h>
 12 #include <linux/moduleloader.h>
 13 #include <linux/module_signature.h>
 14 #include <linux/trace_events.h>
 15 #include <linux/init.h>
 16 #include <linux/kallsyms.h>
 17 #include <linux/buildid.h>
 18 #include <linux/fs.h>
 19 #include <linux/kernel.h>
 20 #include <linux/kernel_read_file.h>
 21 #include <linux/kstrtox.h>
 22 #include <linux/slab.h>
 23 #include <linux/vmalloc.h>
 24 #include <linux/elf.h>
 25 #include <linux/seq_file.h>
 26 #include <linux/syscalls.h>
 27 #include <linux/fcntl.h>
 28 #include <linux/rcupdate.h>
 29 #include <linux/capability.h>
 30 #include <linux/cpu.h>
 31 #include <linux/moduleparam.h>
 32 #include <linux/errno.h>
 33 #include <linux/err.h>
 34 #include <linux/vermagic.h>
 35 #include <linux/notifier.h>
 36 #include <linux/sched.h>
 37 #include <linux/device.h>
 38 #include <linux/string.h>
 39 #include <linux/mutex.h>
 40 #include <linux/rculist.h>
 41 #include <linux/uaccess.h>
 42 #include <asm/cacheflush.h>
 43 #include <linux/set_memory.h>
 44 #include <asm/mmu_context.h>
 45 #include <linux/license.h>
 46 #include <asm/sections.h>
 47 #include <linux/tracepoint.h>
 48 #include <linux/ftrace.h>
 49 #include <linux/livepatch.h>
 50 #include <linux/async.h>
 51 #include <linux/percpu.h>
 52 #include <linux/kmemleak.h>
 53 #include <linux/jump_label.h>
 54 #include <linux/pfn.h>
 55 #include <linux/bsearch.h>
 56 #include <linux/dynamic_debug.h>
 57 #include <linux/audit.h>
 58 #include <linux/cfi.h>
 59 #include <linux/codetag.h>
 60 #include <linux/debugfs.h>
 61 #include <linux/execmem.h>
 62 #include <uapi/linux/module.h>
 63 #include "internal.h"
 64 
 65 #define CREATE_TRACE_POINTS
 66 #include <trace/events/module.h>
 67 #include <linux/ccsecurity.h>
 68 
 69 /*
 70  * Mutex protects:
 71  * 1) List of modules (also safely readable with preempt_disable),
 72  * 2) module_use links,
 73  * 3) mod_tree.addr_min/mod_tree.addr_max.
 74  * (delete and add uses RCU list operations).
 75  */
 76 DEFINE_MUTEX(module_mutex);
 77 LIST_HEAD(modules);
 78 
 79 /* Work queue for freeing init sections in success case */
 80 static void do_free_init(struct work_struct *w);
 81 static DECLARE_WORK(init_free_wq, do_free_init);
 82 static LLIST_HEAD(init_free_list);
 83 
 84 struct mod_tree_root mod_tree __cacheline_aligned = {
 85         .addr_min = -1UL,
 86 };
 87 
 88 struct symsearch {
 89         const struct kernel_symbol *start, *stop;
 90         const s32 *crcs;
 91         enum mod_license license;
 92 };
 93 
 94 /*
 95  * Bounds of module memory, for speeding up __module_address.
 96  * Protected by module_mutex.
 97  */
 98 static void __mod_update_bounds(enum mod_mem_type type __maybe_unused, void *base,
 99                                 unsigned int size, struct mod_tree_root *tree)
100 {
101         unsigned long min = (unsigned long)base;
102         unsigned long max = min + size;
103 
104 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
105         if (mod_mem_type_is_core_data(type)) {
106                 if (min < tree->data_addr_min)
107                         tree->data_addr_min = min;
108                 if (max > tree->data_addr_max)
109                         tree->data_addr_max = max;
110                 return;
111         }
112 #endif
113         if (min < tree->addr_min)
114                 tree->addr_min = min;
115         if (max > tree->addr_max)
116                 tree->addr_max = max;
117 }
118 
119 static void mod_update_bounds(struct module *mod)
120 {
121         for_each_mod_mem_type(type) {
122                 struct module_memory *mod_mem = &mod->mem[type];
123 
124                 if (mod_mem->size)
125                         __mod_update_bounds(type, mod_mem->base, mod_mem->size, &mod_tree);
126         }
127 }
128 
129 /* Block module loading/unloading? */
130 int modules_disabled;
131 core_param(nomodule, modules_disabled, bint, 0);
132 
133 /* Waiting for a module to finish initializing? */
134 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
135 
136 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
137 
138 int register_module_notifier(struct notifier_block *nb)
139 {
140         return blocking_notifier_chain_register(&module_notify_list, nb);
141 }
142 EXPORT_SYMBOL(register_module_notifier);
143 
144 int unregister_module_notifier(struct notifier_block *nb)
145 {
146         return blocking_notifier_chain_unregister(&module_notify_list, nb);
147 }
148 EXPORT_SYMBOL(unregister_module_notifier);
149 
150 /*
151  * We require a truly strong try_module_get(): 0 means success.
152  * Otherwise an error is returned due to ongoing or failed
153  * initialization etc.
154  */
155 static inline int strong_try_module_get(struct module *mod)
156 {
157         BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
158         if (mod && mod->state == MODULE_STATE_COMING)
159                 return -EBUSY;
160         if (try_module_get(mod))
161                 return 0;
162         else
163                 return -ENOENT;
164 }
165 
166 static inline void add_taint_module(struct module *mod, unsigned flag,
167                                     enum lockdep_ok lockdep_ok)
168 {
169         add_taint(flag, lockdep_ok);
170         set_bit(flag, &mod->taints);
171 }
172 
173 /*
174  * A thread that wants to hold a reference to a module only while it
175  * is running can call this to safely exit.
176  */
177 void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
178 {
179         module_put(mod);
180         kthread_exit(code);
181 }
182 EXPORT_SYMBOL(__module_put_and_kthread_exit);
183 
184 /* Find a module section: 0 means not found. */
185 static unsigned int find_sec(const struct load_info *info, const char *name)
186 {
187         unsigned int i;
188 
189         for (i = 1; i < info->hdr->e_shnum; i++) {
190                 Elf_Shdr *shdr = &info->sechdrs[i];
191                 /* Alloc bit cleared means "ignore it." */
192                 if ((shdr->sh_flags & SHF_ALLOC)
193                     && strcmp(info->secstrings + shdr->sh_name, name) == 0)
194                         return i;
195         }
196         return 0;
197 }
198 
199 /* Find a module section, or NULL. */
200 static void *section_addr(const struct load_info *info, const char *name)
201 {
202         /* Section 0 has sh_addr 0. */
203         return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
204 }
205 
206 /* Find a module section, or NULL.  Fill in number of "objects" in section. */
207 static void *section_objs(const struct load_info *info,
208                           const char *name,
209                           size_t object_size,
210                           unsigned int *num)
211 {
212         unsigned int sec = find_sec(info, name);
213 
214         /* Section 0 has sh_addr 0 and sh_size 0. */
215         *num = info->sechdrs[sec].sh_size / object_size;
216         return (void *)info->sechdrs[sec].sh_addr;
217 }
218 
219 /* Find a module section: 0 means not found. Ignores SHF_ALLOC flag. */
220 static unsigned int find_any_sec(const struct load_info *info, const char *name)
221 {
222         unsigned int i;
223 
224         for (i = 1; i < info->hdr->e_shnum; i++) {
225                 Elf_Shdr *shdr = &info->sechdrs[i];
226                 if (strcmp(info->secstrings + shdr->sh_name, name) == 0)
227                         return i;
228         }
229         return 0;
230 }
231 
232 /*
233  * Find a module section, or NULL. Fill in number of "objects" in section.
234  * Ignores SHF_ALLOC flag.
235  */
236 static __maybe_unused void *any_section_objs(const struct load_info *info,
237                                              const char *name,
238                                              size_t object_size,
239                                              unsigned int *num)
240 {
241         unsigned int sec = find_any_sec(info, name);
242 
243         /* Section 0 has sh_addr 0 and sh_size 0. */
244         *num = info->sechdrs[sec].sh_size / object_size;
245         return (void *)info->sechdrs[sec].sh_addr;
246 }
247 
248 #ifndef CONFIG_MODVERSIONS
249 #define symversion(base, idx) NULL
250 #else
251 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
252 #endif
253 
254 static const char *kernel_symbol_name(const struct kernel_symbol *sym)
255 {
256 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
257         return offset_to_ptr(&sym->name_offset);
258 #else
259         return sym->name;
260 #endif
261 }
262 
263 static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
264 {
265 #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
266         if (!sym->namespace_offset)
267                 return NULL;
268         return offset_to_ptr(&sym->namespace_offset);
269 #else
270         return sym->namespace;
271 #endif
272 }
273 
274 int cmp_name(const void *name, const void *sym)
275 {
276         return strcmp(name, kernel_symbol_name(sym));
277 }
278 
279 static bool find_exported_symbol_in_section(const struct symsearch *syms,
280                                             struct module *owner,
281                                             struct find_symbol_arg *fsa)
282 {
283         struct kernel_symbol *sym;
284 
285         if (!fsa->gplok && syms->license == GPL_ONLY)
286                 return false;
287 
288         sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
289                         sizeof(struct kernel_symbol), cmp_name);
290         if (!sym)
291                 return false;
292 
293         fsa->owner = owner;
294         fsa->crc = symversion(syms->crcs, sym - syms->start);
295         fsa->sym = sym;
296         fsa->license = syms->license;
297 
298         return true;
299 }
300 
301 /*
302  * Find an exported symbol and return it, along with, (optional) crc and
303  * (optional) module which owns it.  Needs preempt disabled or module_mutex.
304  */
305 bool find_symbol(struct find_symbol_arg *fsa)
306 {
307         static const struct symsearch arr[] = {
308                 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
309                   NOT_GPL_ONLY },
310                 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
311                   __start___kcrctab_gpl,
312                   GPL_ONLY },
313         };
314         struct module *mod;
315         unsigned int i;
316 
317         module_assert_mutex_or_preempt();
318 
319         for (i = 0; i < ARRAY_SIZE(arr); i++)
320                 if (find_exported_symbol_in_section(&arr[i], NULL, fsa))
321                         return true;
322 
323         list_for_each_entry_rcu(mod, &modules, list,
324                                 lockdep_is_held(&module_mutex)) {
325                 struct symsearch arr[] = {
326                         { mod->syms, mod->syms + mod->num_syms, mod->crcs,
327                           NOT_GPL_ONLY },
328                         { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
329                           mod->gpl_crcs,
330                           GPL_ONLY },
331                 };
332 
333                 if (mod->state == MODULE_STATE_UNFORMED)
334                         continue;
335 
336                 for (i = 0; i < ARRAY_SIZE(arr); i++)
337                         if (find_exported_symbol_in_section(&arr[i], mod, fsa))
338                                 return true;
339         }
340 
341         pr_debug("Failed to find symbol %s\n", fsa->name);
342         return false;
343 }
344 
345 /*
346  * Search for module by name: must hold module_mutex (or preempt disabled
347  * for read-only access).
348  */
349 struct module *find_module_all(const char *name, size_t len,
350                                bool even_unformed)
351 {
352         struct module *mod;
353 
354         module_assert_mutex_or_preempt();
355 
356         list_for_each_entry_rcu(mod, &modules, list,
357                                 lockdep_is_held(&module_mutex)) {
358                 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
359                         continue;
360                 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
361                         return mod;
362         }
363         return NULL;
364 }
365 
366 struct module *find_module(const char *name)
367 {
368         return find_module_all(name, strlen(name), false);
369 }
370 
371 #ifdef CONFIG_SMP
372 
373 static inline void __percpu *mod_percpu(struct module *mod)
374 {
375         return mod->percpu;
376 }
377 
378 static int percpu_modalloc(struct module *mod, struct load_info *info)
379 {
380         Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
381         unsigned long align = pcpusec->sh_addralign;
382 
383         if (!pcpusec->sh_size)
384                 return 0;
385 
386         if (align > PAGE_SIZE) {
387                 pr_warn("%s: per-cpu alignment %li > %li\n",
388                         mod->name, align, PAGE_SIZE);
389                 align = PAGE_SIZE;
390         }
391 
392         mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
393         if (!mod->percpu) {
394                 pr_warn("%s: Could not allocate %lu bytes percpu data\n",
395                         mod->name, (unsigned long)pcpusec->sh_size);
396                 return -ENOMEM;
397         }
398         mod->percpu_size = pcpusec->sh_size;
399         return 0;
400 }
401 
402 static void percpu_modfree(struct module *mod)
403 {
404         free_percpu(mod->percpu);
405 }
406 
407 static unsigned int find_pcpusec(struct load_info *info)
408 {
409         return find_sec(info, ".data..percpu");
410 }
411 
412 static void percpu_modcopy(struct module *mod,
413                            const void *from, unsigned long size)
414 {
415         int cpu;
416 
417         for_each_possible_cpu(cpu)
418                 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
419 }
420 
421 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
422 {
423         struct module *mod;
424         unsigned int cpu;
425 
426         preempt_disable();
427 
428         list_for_each_entry_rcu(mod, &modules, list) {
429                 if (mod->state == MODULE_STATE_UNFORMED)
430                         continue;
431                 if (!mod->percpu_size)
432                         continue;
433                 for_each_possible_cpu(cpu) {
434                         void *start = per_cpu_ptr(mod->percpu, cpu);
435                         void *va = (void *)addr;
436 
437                         if (va >= start && va < start + mod->percpu_size) {
438                                 if (can_addr) {
439                                         *can_addr = (unsigned long) (va - start);
440                                         *can_addr += (unsigned long)
441                                                 per_cpu_ptr(mod->percpu,
442                                                             get_boot_cpu_id());
443                                 }
444                                 preempt_enable();
445                                 return true;
446                         }
447                 }
448         }
449 
450         preempt_enable();
451         return false;
452 }
453 
454 /**
455  * is_module_percpu_address() - test whether address is from module static percpu
456  * @addr: address to test
457  *
458  * Test whether @addr belongs to module static percpu area.
459  *
460  * Return: %true if @addr is from module static percpu area
461  */
462 bool is_module_percpu_address(unsigned long addr)
463 {
464         return __is_module_percpu_address(addr, NULL);
465 }
466 
467 #else /* ... !CONFIG_SMP */
468 
469 static inline void __percpu *mod_percpu(struct module *mod)
470 {
471         return NULL;
472 }
473 static int percpu_modalloc(struct module *mod, struct load_info *info)
474 {
475         /* UP modules shouldn't have this section: ENOMEM isn't quite right */
476         if (info->sechdrs[info->index.pcpu].sh_size != 0)
477                 return -ENOMEM;
478         return 0;
479 }
480 static inline void percpu_modfree(struct module *mod)
481 {
482 }
483 static unsigned int find_pcpusec(struct load_info *info)
484 {
485         return 0;
486 }
487 static inline void percpu_modcopy(struct module *mod,
488                                   const void *from, unsigned long size)
489 {
490         /* pcpusec should be 0, and size of that section should be 0. */
491         BUG_ON(size != 0);
492 }
493 bool is_module_percpu_address(unsigned long addr)
494 {
495         return false;
496 }
497 
498 bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
499 {
500         return false;
501 }
502 
503 #endif /* CONFIG_SMP */
504 
505 #define MODINFO_ATTR(field)     \
506 static void setup_modinfo_##field(struct module *mod, const char *s)  \
507 {                                                                     \
508         mod->field = kstrdup(s, GFP_KERNEL);                          \
509 }                                                                     \
510 static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
511                         struct module_kobject *mk, char *buffer)      \
512 {                                                                     \
513         return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
514 }                                                                     \
515 static int modinfo_##field##_exists(struct module *mod)               \
516 {                                                                     \
517         return mod->field != NULL;                                    \
518 }                                                                     \
519 static void free_modinfo_##field(struct module *mod)                  \
520 {                                                                     \
521         kfree(mod->field);                                            \
522         mod->field = NULL;                                            \
523 }                                                                     \
524 static struct module_attribute modinfo_##field = {                    \
525         .attr = { .name = __stringify(field), .mode = 0444 },         \
526         .show = show_modinfo_##field,                                 \
527         .setup = setup_modinfo_##field,                               \
528         .test = modinfo_##field##_exists,                             \
529         .free = free_modinfo_##field,                                 \
530 };
531 
532 MODINFO_ATTR(version);
533 MODINFO_ATTR(srcversion);
534 
535 static struct {
536         char name[MODULE_NAME_LEN + 1];
537         char taints[MODULE_FLAGS_BUF_SIZE];
538 } last_unloaded_module;
539 
540 #ifdef CONFIG_MODULE_UNLOAD
541 
542 EXPORT_TRACEPOINT_SYMBOL(module_get);
543 
544 /* MODULE_REF_BASE is the base reference count by kmodule loader. */
545 #define MODULE_REF_BASE 1
546 
547 /* Init the unload section of the module. */
548 static int module_unload_init(struct module *mod)
549 {
550         /*
551          * Initialize reference counter to MODULE_REF_BASE.
552          * refcnt == 0 means module is going.
553          */
554         atomic_set(&mod->refcnt, MODULE_REF_BASE);
555 
556         INIT_LIST_HEAD(&mod->source_list);
557         INIT_LIST_HEAD(&mod->target_list);
558 
559         /* Hold reference count during initialization. */
560         atomic_inc(&mod->refcnt);
561 
562         return 0;
563 }
564 
565 /* Does a already use b? */
566 static int already_uses(struct module *a, struct module *b)
567 {
568         struct module_use *use;
569 
570         list_for_each_entry(use, &b->source_list, source_list) {
571                 if (use->source == a)
572                         return 1;
573         }
574         pr_debug("%s does not use %s!\n", a->name, b->name);
575         return 0;
576 }
577 
578 /*
579  * Module a uses b
580  *  - we add 'a' as a "source", 'b' as a "target" of module use
581  *  - the module_use is added to the list of 'b' sources (so
582  *    'b' can walk the list to see who sourced them), and of 'a'
583  *    targets (so 'a' can see what modules it targets).
584  */
585 static int add_module_usage(struct module *a, struct module *b)
586 {
587         struct module_use *use;
588 
589         pr_debug("Allocating new usage for %s.\n", a->name);
590         use = kmalloc(sizeof(*use), GFP_ATOMIC);
591         if (!use)
592                 return -ENOMEM;
593 
594         use->source = a;
595         use->target = b;
596         list_add(&use->source_list, &b->source_list);
597         list_add(&use->target_list, &a->target_list);
598         return 0;
599 }
600 
601 /* Module a uses b: caller needs module_mutex() */
602 static int ref_module(struct module *a, struct module *b)
603 {
604         int err;
605 
606         if (b == NULL || already_uses(a, b))
607                 return 0;
608 
609         /* If module isn't available, we fail. */
610         err = strong_try_module_get(b);
611         if (err)
612                 return err;
613 
614         err = add_module_usage(a, b);
615         if (err) {
616                 module_put(b);
617                 return err;
618         }
619         return 0;
620 }
621 
622 /* Clear the unload stuff of the module. */
623 static void module_unload_free(struct module *mod)
624 {
625         struct module_use *use, *tmp;
626 
627         mutex_lock(&module_mutex);
628         list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
629                 struct module *i = use->target;
630                 pr_debug("%s unusing %s\n", mod->name, i->name);
631                 module_put(i);
632                 list_del(&use->source_list);
633                 list_del(&use->target_list);
634                 kfree(use);
635         }
636         mutex_unlock(&module_mutex);
637 }
638 
639 #ifdef CONFIG_MODULE_FORCE_UNLOAD
640 static inline int try_force_unload(unsigned int flags)
641 {
642         int ret = (flags & O_TRUNC);
643         if (ret)
644                 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
645         return ret;
646 }
647 #else
648 static inline int try_force_unload(unsigned int flags)
649 {
650         return 0;
651 }
652 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
653 
654 /* Try to release refcount of module, 0 means success. */
655 static int try_release_module_ref(struct module *mod)
656 {
657         int ret;
658 
659         /* Try to decrement refcnt which we set at loading */
660         ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
661         BUG_ON(ret < 0);
662         if (ret)
663                 /* Someone can put this right now, recover with checking */
664                 ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
665 
666         return ret;
667 }
668 
669 static int try_stop_module(struct module *mod, int flags, int *forced)
670 {
671         /* If it's not unused, quit unless we're forcing. */
672         if (try_release_module_ref(mod) != 0) {
673                 *forced = try_force_unload(flags);
674                 if (!(*forced))
675                         return -EWOULDBLOCK;
676         }
677 
678         /* Mark it as dying. */
679         mod->state = MODULE_STATE_GOING;
680 
681         return 0;
682 }
683 
684 /**
685  * module_refcount() - return the refcount or -1 if unloading
686  * @mod:        the module we're checking
687  *
688  * Return:
689  *      -1 if the module is in the process of unloading
690  *      otherwise the number of references in the kernel to the module
691  */
692 int module_refcount(struct module *mod)
693 {
694         return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
695 }
696 EXPORT_SYMBOL(module_refcount);
697 
698 /* This exists whether we can unload or not */
699 static void free_module(struct module *mod);
700 
701 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
702                 unsigned int, flags)
703 {
704         struct module *mod;
705         char name[MODULE_NAME_LEN];
706         char buf[MODULE_FLAGS_BUF_SIZE];
707         int ret, forced = 0;
708 
709         if (!capable(CAP_SYS_MODULE) || modules_disabled)
710                 return -EPERM;
711         if (!ccs_capable(CCS_USE_KERNEL_MODULE))
712                 return -EPERM;
713 
714         if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
715                 return -EFAULT;
716         name[MODULE_NAME_LEN-1] = '\0';
717 
718         audit_log_kern_module(name);
719 
720         if (mutex_lock_interruptible(&module_mutex) != 0)
721                 return -EINTR;
722 
723         mod = find_module(name);
724         if (!mod) {
725                 ret = -ENOENT;
726                 goto out;
727         }
728 
729         if (!list_empty(&mod->source_list)) {
730                 /* Other modules depend on us: get rid of them first. */
731                 ret = -EWOULDBLOCK;
732                 goto out;
733         }
734 
735         /* Doing init or already dying? */
736         if (mod->state != MODULE_STATE_LIVE) {
737                 /* FIXME: if (force), slam module count damn the torpedoes */
738                 pr_debug("%s already dying\n", mod->name);
739                 ret = -EBUSY;
740                 goto out;
741         }
742 
743         /* If it has an init func, it must have an exit func to unload */
744         if (mod->init && !mod->exit) {
745                 forced = try_force_unload(flags);
746                 if (!forced) {
747                         /* This module can't be removed */
748                         ret = -EBUSY;
749                         goto out;
750                 }
751         }
752 
753         ret = try_stop_module(mod, flags, &forced);
754         if (ret != 0)
755                 goto out;
756 
757         mutex_unlock(&module_mutex);
758         /* Final destruction now no one is using it. */
759         if (mod->exit != NULL)
760                 mod->exit();
761         blocking_notifier_call_chain(&module_notify_list,
762                                      MODULE_STATE_GOING, mod);
763         klp_module_going(mod);
764         ftrace_release_mod(mod);
765 
766         async_synchronize_full();
767 
768         /* Store the name and taints of the last unloaded module for diagnostic purposes */
769         strscpy(last_unloaded_module.name, mod->name, sizeof(last_unloaded_module.name));
770         strscpy(last_unloaded_module.taints, module_flags(mod, buf, false), sizeof(last_unloaded_module.taints));
771 
772         free_module(mod);
773         /* someone could wait for the module in add_unformed_module() */
774         wake_up_all(&module_wq);
775         return 0;
776 out:
777         mutex_unlock(&module_mutex);
778         return ret;
779 }
780 
781 void __symbol_put(const char *symbol)
782 {
783         struct find_symbol_arg fsa = {
784                 .name   = symbol,
785                 .gplok  = true,
786         };
787 
788         preempt_disable();
789         BUG_ON(!find_symbol(&fsa));
790         module_put(fsa.owner);
791         preempt_enable();
792 }
793 EXPORT_SYMBOL(__symbol_put);
794 
795 /* Note this assumes addr is a function, which it currently always is. */
796 void symbol_put_addr(void *addr)
797 {
798         struct module *modaddr;
799         unsigned long a = (unsigned long)dereference_function_descriptor(addr);
800 
801         if (core_kernel_text(a))
802                 return;
803 
804         /*
805          * Even though we hold a reference on the module; we still need to
806          * disable preemption in order to safely traverse the data structure.
807          */
808         preempt_disable();
809         modaddr = __module_text_address(a);
810         BUG_ON(!modaddr);
811         module_put(modaddr);
812         preempt_enable();
813 }
814 EXPORT_SYMBOL_GPL(symbol_put_addr);
815 
816 static ssize_t show_refcnt(struct module_attribute *mattr,
817                            struct module_kobject *mk, char *buffer)
818 {
819         return sprintf(buffer, "%i\n", module_refcount(mk->mod));
820 }
821 
822 static struct module_attribute modinfo_refcnt =
823         __ATTR(refcnt, 0444, show_refcnt, NULL);
824 
825 void __module_get(struct module *module)
826 {
827         if (module) {
828                 atomic_inc(&module->refcnt);
829                 trace_module_get(module, _RET_IP_);
830         }
831 }
832 EXPORT_SYMBOL(__module_get);
833 
834 bool try_module_get(struct module *module)
835 {
836         bool ret = true;
837 
838         if (module) {
839                 /* Note: here, we can fail to get a reference */
840                 if (likely(module_is_live(module) &&
841                            atomic_inc_not_zero(&module->refcnt) != 0))
842                         trace_module_get(module, _RET_IP_);
843                 else
844                         ret = false;
845         }
846         return ret;
847 }
848 EXPORT_SYMBOL(try_module_get);
849 
850 void module_put(struct module *module)
851 {
852         int ret;
853 
854         if (module) {
855                 ret = atomic_dec_if_positive(&module->refcnt);
856                 WARN_ON(ret < 0);       /* Failed to put refcount */
857                 trace_module_put(module, _RET_IP_);
858         }
859 }
860 EXPORT_SYMBOL(module_put);
861 
862 #else /* !CONFIG_MODULE_UNLOAD */
863 static inline void module_unload_free(struct module *mod)
864 {
865 }
866 
867 static int ref_module(struct module *a, struct module *b)
868 {
869         return strong_try_module_get(b);
870 }
871 
872 static inline int module_unload_init(struct module *mod)
873 {
874         return 0;
875 }
876 #endif /* CONFIG_MODULE_UNLOAD */
877 
878 size_t module_flags_taint(unsigned long taints, char *buf)
879 {
880         size_t l = 0;
881         int i;
882 
883         for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
884                 if (taint_flags[i].module && test_bit(i, &taints))
885                         buf[l++] = taint_flags[i].c_true;
886         }
887 
888         return l;
889 }
890 
891 static ssize_t show_initstate(struct module_attribute *mattr,
892                               struct module_kobject *mk, char *buffer)
893 {
894         const char *state = "unknown";
895 
896         switch (mk->mod->state) {
897         case MODULE_STATE_LIVE:
898                 state = "live";
899                 break;
900         case MODULE_STATE_COMING:
901                 state = "coming";
902                 break;
903         case MODULE_STATE_GOING:
904                 state = "going";
905                 break;
906         default:
907                 BUG();
908         }
909         return sprintf(buffer, "%s\n", state);
910 }
911 
912 static struct module_attribute modinfo_initstate =
913         __ATTR(initstate, 0444, show_initstate, NULL);
914 
915 static ssize_t store_uevent(struct module_attribute *mattr,
916                             struct module_kobject *mk,
917                             const char *buffer, size_t count)
918 {
919         int rc;
920 
921         rc = kobject_synth_uevent(&mk->kobj, buffer, count);
922         return rc ? rc : count;
923 }
924 
925 struct module_attribute module_uevent =
926         __ATTR(uevent, 0200, NULL, store_uevent);
927 
928 static ssize_t show_coresize(struct module_attribute *mattr,
929                              struct module_kobject *mk, char *buffer)
930 {
931         unsigned int size = mk->mod->mem[MOD_TEXT].size;
932 
933         if (!IS_ENABLED(CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC)) {
934                 for_class_mod_mem_type(type, core_data)
935                         size += mk->mod->mem[type].size;
936         }
937         return sprintf(buffer, "%u\n", size);
938 }
939 
940 static struct module_attribute modinfo_coresize =
941         __ATTR(coresize, 0444, show_coresize, NULL);
942 
943 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
944 static ssize_t show_datasize(struct module_attribute *mattr,
945                              struct module_kobject *mk, char *buffer)
946 {
947         unsigned int size = 0;
948 
949         for_class_mod_mem_type(type, core_data)
950                 size += mk->mod->mem[type].size;
951         return sprintf(buffer, "%u\n", size);
952 }
953 
954 static struct module_attribute modinfo_datasize =
955         __ATTR(datasize, 0444, show_datasize, NULL);
956 #endif
957 
958 static ssize_t show_initsize(struct module_attribute *mattr,
959                              struct module_kobject *mk, char *buffer)
960 {
961         unsigned int size = 0;
962 
963         for_class_mod_mem_type(type, init)
964                 size += mk->mod->mem[type].size;
965         return sprintf(buffer, "%u\n", size);
966 }
967 
968 static struct module_attribute modinfo_initsize =
969         __ATTR(initsize, 0444, show_initsize, NULL);
970 
971 static ssize_t show_taint(struct module_attribute *mattr,
972                           struct module_kobject *mk, char *buffer)
973 {
974         size_t l;
975 
976         l = module_flags_taint(mk->mod->taints, buffer);
977         buffer[l++] = '\n';
978         return l;
979 }
980 
981 static struct module_attribute modinfo_taint =
982         __ATTR(taint, 0444, show_taint, NULL);
983 
984 struct module_attribute *modinfo_attrs[] = {
985         &module_uevent,
986         &modinfo_version,
987         &modinfo_srcversion,
988         &modinfo_initstate,
989         &modinfo_coresize,
990 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
991         &modinfo_datasize,
992 #endif
993         &modinfo_initsize,
994         &modinfo_taint,
995 #ifdef CONFIG_MODULE_UNLOAD
996         &modinfo_refcnt,
997 #endif
998         NULL,
999 };
1000 
1001 size_t modinfo_attrs_count = ARRAY_SIZE(modinfo_attrs);
1002 
1003 static const char vermagic[] = VERMAGIC_STRING;
1004 
1005 int try_to_force_load(struct module *mod, const char *reason)
1006 {
1007 #ifdef CONFIG_MODULE_FORCE_LOAD
1008         if (!test_taint(TAINT_FORCED_MODULE))
1009                 pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1010         add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1011         return 0;
1012 #else
1013         return -ENOEXEC;
1014 #endif
1015 }
1016 
1017 /* Parse tag=value strings from .modinfo section */
1018 char *module_next_tag_pair(char *string, unsigned long *secsize)
1019 {
1020         /* Skip non-zero chars */
1021         while (string[0]) {
1022                 string++;
1023                 if ((*secsize)-- <= 1)
1024                         return NULL;
1025         }
1026 
1027         /* Skip any zero padding. */
1028         while (!string[0]) {
1029                 string++;
1030                 if ((*secsize)-- <= 1)
1031                         return NULL;
1032         }
1033         return string;
1034 }
1035 
1036 static char *get_next_modinfo(const struct load_info *info, const char *tag,
1037                               char *prev)
1038 {
1039         char *p;
1040         unsigned int taglen = strlen(tag);
1041         Elf_Shdr *infosec = &info->sechdrs[info->index.info];
1042         unsigned long size = infosec->sh_size;
1043 
1044         /*
1045          * get_modinfo() calls made before rewrite_section_headers()
1046          * must use sh_offset, as sh_addr isn't set!
1047          */
1048         char *modinfo = (char *)info->hdr + infosec->sh_offset;
1049 
1050         if (prev) {
1051                 size -= prev - modinfo;
1052                 modinfo = module_next_tag_pair(prev, &size);
1053         }
1054 
1055         for (p = modinfo; p; p = module_next_tag_pair(p, &size)) {
1056                 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
1057                         return p + taglen + 1;
1058         }
1059         return NULL;
1060 }
1061 
1062 static char *get_modinfo(const struct load_info *info, const char *tag)
1063 {
1064         return get_next_modinfo(info, tag, NULL);
1065 }
1066 
1067 static int verify_namespace_is_imported(const struct load_info *info,
1068                                         const struct kernel_symbol *sym,
1069                                         struct module *mod)
1070 {
1071         const char *namespace;
1072         char *imported_namespace;
1073 
1074         namespace = kernel_symbol_namespace(sym);
1075         if (namespace && namespace[0]) {
1076                 for_each_modinfo_entry(imported_namespace, info, "import_ns") {
1077                         if (strcmp(namespace, imported_namespace) == 0)
1078                                 return 0;
1079                 }
1080 #ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1081                 pr_warn(
1082 #else
1083                 pr_err(
1084 #endif
1085                         "%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1086                         mod->name, kernel_symbol_name(sym), namespace);
1087 #ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1088                 return -EINVAL;
1089 #endif
1090         }
1091         return 0;
1092 }
1093 
1094 static bool inherit_taint(struct module *mod, struct module *owner, const char *name)
1095 {
1096         if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
1097                 return true;
1098 
1099         if (mod->using_gplonly_symbols) {
1100                 pr_err("%s: module using GPL-only symbols uses symbols %s from proprietary module %s.\n",
1101                         mod->name, name, owner->name);
1102                 return false;
1103         }
1104 
1105         if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
1106                 pr_warn("%s: module uses symbols %s from proprietary module %s, inheriting taint.\n",
1107                         mod->name, name, owner->name);
1108                 set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
1109         }
1110         return true;
1111 }
1112 
1113 /* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1114 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1115                                                   const struct load_info *info,
1116                                                   const char *name,
1117                                                   char ownername[])
1118 {
1119         struct find_symbol_arg fsa = {
1120                 .name   = name,
1121                 .gplok  = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)),
1122                 .warn   = true,
1123         };
1124         int err;
1125 
1126         /*
1127          * The module_mutex should not be a heavily contended lock;
1128          * if we get the occasional sleep here, we'll go an extra iteration
1129          * in the wait_event_interruptible(), which is harmless.
1130          */
1131         sched_annotate_sleep();
1132         mutex_lock(&module_mutex);
1133         if (!find_symbol(&fsa))
1134                 goto unlock;
1135 
1136         if (fsa.license == GPL_ONLY)
1137                 mod->using_gplonly_symbols = true;
1138 
1139         if (!inherit_taint(mod, fsa.owner, name)) {
1140                 fsa.sym = NULL;
1141                 goto getname;
1142         }
1143 
1144         if (!check_version(info, name, mod, fsa.crc)) {
1145                 fsa.sym = ERR_PTR(-EINVAL);
1146                 goto getname;
1147         }
1148 
1149         err = verify_namespace_is_imported(info, fsa.sym, mod);
1150         if (err) {
1151                 fsa.sym = ERR_PTR(err);
1152                 goto getname;
1153         }
1154 
1155         err = ref_module(mod, fsa.owner);
1156         if (err) {
1157                 fsa.sym = ERR_PTR(err);
1158                 goto getname;
1159         }
1160 
1161 getname:
1162         /* We must make copy under the lock if we failed to get ref. */
1163         strncpy(ownername, module_name(fsa.owner), MODULE_NAME_LEN);
1164 unlock:
1165         mutex_unlock(&module_mutex);
1166         return fsa.sym;
1167 }
1168 
1169 static const struct kernel_symbol *
1170 resolve_symbol_wait(struct module *mod,
1171                     const struct load_info *info,
1172                     const char *name)
1173 {
1174         const struct kernel_symbol *ksym;
1175         char owner[MODULE_NAME_LEN];
1176 
1177         if (wait_event_interruptible_timeout(module_wq,
1178                         !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1179                         || PTR_ERR(ksym) != -EBUSY,
1180                                              30 * HZ) <= 0) {
1181                 pr_warn("%s: gave up waiting for init of module %s.\n",
1182                         mod->name, owner);
1183         }
1184         return ksym;
1185 }
1186 
1187 void __weak module_arch_cleanup(struct module *mod)
1188 {
1189 }
1190 
1191 void __weak module_arch_freeing_init(struct module *mod)
1192 {
1193 }
1194 
1195 static int module_memory_alloc(struct module *mod, enum mod_mem_type type)
1196 {
1197         unsigned int size = PAGE_ALIGN(mod->mem[type].size);
1198         enum execmem_type execmem_type;
1199         void *ptr;
1200 
1201         mod->mem[type].size = size;
1202 
1203         if (mod_mem_type_is_data(type))
1204                 execmem_type = EXECMEM_MODULE_DATA;
1205         else
1206                 execmem_type = EXECMEM_MODULE_TEXT;
1207 
1208         ptr = execmem_alloc(execmem_type, size);
1209         if (!ptr)
1210                 return -ENOMEM;
1211 
1212         /*
1213          * The pointer to these blocks of memory are stored on the module
1214          * structure and we keep that around so long as the module is
1215          * around. We only free that memory when we unload the module.
1216          * Just mark them as not being a leak then. The .init* ELF
1217          * sections *do* get freed after boot so we *could* treat them
1218          * slightly differently with kmemleak_ignore() and only grey
1219          * them out as they work as typical memory allocations which
1220          * *do* eventually get freed, but let's just keep things simple
1221          * and avoid *any* false positives.
1222          */
1223         kmemleak_not_leak(ptr);
1224 
1225         memset(ptr, 0, size);
1226         mod->mem[type].base = ptr;
1227 
1228         return 0;
1229 }
1230 
1231 static void module_memory_free(struct module *mod, enum mod_mem_type type,
1232                                bool unload_codetags)
1233 {
1234         void *ptr = mod->mem[type].base;
1235 
1236         if (!unload_codetags && mod_mem_type_is_core_data(type))
1237                 return;
1238 
1239         execmem_free(ptr);
1240 }
1241 
1242 static void free_mod_mem(struct module *mod, bool unload_codetags)
1243 {
1244         for_each_mod_mem_type(type) {
1245                 struct module_memory *mod_mem = &mod->mem[type];
1246 
1247                 if (type == MOD_DATA)
1248                         continue;
1249 
1250                 /* Free lock-classes; relies on the preceding sync_rcu(). */
1251                 lockdep_free_key_range(mod_mem->base, mod_mem->size);
1252                 if (mod_mem->size)
1253                         module_memory_free(mod, type, unload_codetags);
1254         }
1255 
1256         /* MOD_DATA hosts mod, so free it at last */
1257         lockdep_free_key_range(mod->mem[MOD_DATA].base, mod->mem[MOD_DATA].size);
1258         module_memory_free(mod, MOD_DATA, unload_codetags);
1259 }
1260 
1261 /* Free a module, remove from lists, etc. */
1262 static void free_module(struct module *mod)
1263 {
1264         bool unload_codetags;
1265 
1266         trace_module_free(mod);
1267 
1268         unload_codetags = codetag_unload_module(mod);
1269         if (!unload_codetags)
1270                 pr_warn("%s: memory allocation(s) from the module still alive, cannot unload cleanly\n",
1271                         mod->name);
1272 
1273         mod_sysfs_teardown(mod);
1274 
1275         /*
1276          * We leave it in list to prevent duplicate loads, but make sure
1277          * that noone uses it while it's being deconstructed.
1278          */
1279         mutex_lock(&module_mutex);
1280         mod->state = MODULE_STATE_UNFORMED;
1281         mutex_unlock(&module_mutex);
1282 
1283         /* Arch-specific cleanup. */
1284         module_arch_cleanup(mod);
1285 
1286         /* Module unload stuff */
1287         module_unload_free(mod);
1288 
1289         /* Free any allocated parameters. */
1290         destroy_params(mod->kp, mod->num_kp);
1291 
1292         if (is_livepatch_module(mod))
1293                 free_module_elf(mod);
1294 
1295         /* Now we can delete it from the lists */
1296         mutex_lock(&module_mutex);
1297         /* Unlink carefully: kallsyms could be walking list. */
1298         list_del_rcu(&mod->list);
1299         mod_tree_remove(mod);
1300         /* Remove this module from bug list, this uses list_del_rcu */
1301         module_bug_cleanup(mod);
1302         /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
1303         synchronize_rcu();
1304         if (try_add_tainted_module(mod))
1305                 pr_err("%s: adding tainted module to the unloaded tainted modules list failed.\n",
1306                        mod->name);
1307         mutex_unlock(&module_mutex);
1308 
1309         /* This may be empty, but that's OK */
1310         module_arch_freeing_init(mod);
1311         kfree(mod->args);
1312         percpu_modfree(mod);
1313 
1314         free_mod_mem(mod, unload_codetags);
1315 }
1316 
1317 void *__symbol_get(const char *symbol)
1318 {
1319         struct find_symbol_arg fsa = {
1320                 .name   = symbol,
1321                 .gplok  = true,
1322                 .warn   = true,
1323         };
1324 
1325         preempt_disable();
1326         if (!find_symbol(&fsa))
1327                 goto fail;
1328         if (fsa.license != GPL_ONLY) {
1329                 pr_warn("failing symbol_get of non-GPLONLY symbol %s.\n",
1330                         symbol);
1331                 goto fail;
1332         }
1333         if (strong_try_module_get(fsa.owner))
1334                 goto fail;
1335         preempt_enable();
1336         return (void *)kernel_symbol_value(fsa.sym);
1337 fail:
1338         preempt_enable();
1339         return NULL;
1340 }
1341 EXPORT_SYMBOL_GPL(__symbol_get);
1342 
1343 /*
1344  * Ensure that an exported symbol [global namespace] does not already exist
1345  * in the kernel or in some other module's exported symbol table.
1346  *
1347  * You must hold the module_mutex.
1348  */
1349 static int verify_exported_symbols(struct module *mod)
1350 {
1351         unsigned int i;
1352         const struct kernel_symbol *s;
1353         struct {
1354                 const struct kernel_symbol *sym;
1355                 unsigned int num;
1356         } arr[] = {
1357                 { mod->syms, mod->num_syms },
1358                 { mod->gpl_syms, mod->num_gpl_syms },
1359         };
1360 
1361         for (i = 0; i < ARRAY_SIZE(arr); i++) {
1362                 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1363                         struct find_symbol_arg fsa = {
1364                                 .name   = kernel_symbol_name(s),
1365                                 .gplok  = true,
1366                         };
1367                         if (find_symbol(&fsa)) {
1368                                 pr_err("%s: exports duplicate symbol %s"
1369                                        " (owned by %s)\n",
1370                                        mod->name, kernel_symbol_name(s),
1371                                        module_name(fsa.owner));
1372                                 return -ENOEXEC;
1373                         }
1374                 }
1375         }
1376         return 0;
1377 }
1378 
1379 static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
1380 {
1381         /*
1382          * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
1383          * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
1384          * i386 has a similar problem but may not deserve a fix.
1385          *
1386          * If we ever have to ignore many symbols, consider refactoring the code to
1387          * only warn if referenced by a relocation.
1388          */
1389         if (emachine == EM_386 || emachine == EM_X86_64)
1390                 return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
1391         return false;
1392 }
1393 
1394 /* Change all symbols so that st_value encodes the pointer directly. */
1395 static int simplify_symbols(struct module *mod, const struct load_info *info)
1396 {
1397         Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1398         Elf_Sym *sym = (void *)symsec->sh_addr;
1399         unsigned long secbase;
1400         unsigned int i;
1401         int ret = 0;
1402         const struct kernel_symbol *ksym;
1403 
1404         for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1405                 const char *name = info->strtab + sym[i].st_name;
1406 
1407                 switch (sym[i].st_shndx) {
1408                 case SHN_COMMON:
1409                         /* Ignore common symbols */
1410                         if (!strncmp(name, "__gnu_lto", 9))
1411                                 break;
1412 
1413                         /*
1414                          * We compiled with -fno-common.  These are not
1415                          * supposed to happen.
1416                          */
1417                         pr_debug("Common symbol: %s\n", name);
1418                         pr_warn("%s: please compile with -fno-common\n",
1419                                mod->name);
1420                         ret = -ENOEXEC;
1421                         break;
1422 
1423                 case SHN_ABS:
1424                         /* Don't need to do anything */
1425                         pr_debug("Absolute symbol: 0x%08lx %s\n",
1426                                  (long)sym[i].st_value, name);
1427                         break;
1428 
1429                 case SHN_LIVEPATCH:
1430                         /* Livepatch symbols are resolved by livepatch */
1431                         break;
1432 
1433                 case SHN_UNDEF:
1434                         ksym = resolve_symbol_wait(mod, info, name);
1435                         /* Ok if resolved.  */
1436                         if (ksym && !IS_ERR(ksym)) {
1437                                 sym[i].st_value = kernel_symbol_value(ksym);
1438                                 break;
1439                         }
1440 
1441                         /* Ok if weak or ignored.  */
1442                         if (!ksym &&
1443                             (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
1444                              ignore_undef_symbol(info->hdr->e_machine, name)))
1445                                 break;
1446 
1447                         ret = PTR_ERR(ksym) ?: -ENOENT;
1448                         pr_warn("%s: Unknown symbol %s (err %d)\n",
1449                                 mod->name, name, ret);
1450                         break;
1451 
1452                 default:
1453                         /* Divert to percpu allocation if a percpu var. */
1454                         if (sym[i].st_shndx == info->index.pcpu)
1455                                 secbase = (unsigned long)mod_percpu(mod);
1456                         else
1457                                 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1458                         sym[i].st_value += secbase;
1459                         break;
1460                 }
1461         }
1462 
1463         return ret;
1464 }
1465 
1466 static int apply_relocations(struct module *mod, const struct load_info *info)
1467 {
1468         unsigned int i;
1469         int err = 0;
1470 
1471         /* Now do relocations. */
1472         for (i = 1; i < info->hdr->e_shnum; i++) {
1473                 unsigned int infosec = info->sechdrs[i].sh_info;
1474 
1475                 /* Not a valid relocation section? */
1476                 if (infosec >= info->hdr->e_shnum)
1477                         continue;
1478 
1479                 /* Don't bother with non-allocated sections */
1480                 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
1481                         continue;
1482 
1483                 if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
1484                         err = klp_apply_section_relocs(mod, info->sechdrs,
1485                                                        info->secstrings,
1486                                                        info->strtab,
1487                                                        info->index.sym, i,
1488                                                        NULL);
1489                 else if (info->sechdrs[i].sh_type == SHT_REL)
1490                         err = apply_relocate(info->sechdrs, info->strtab,
1491                                              info->index.sym, i, mod);
1492                 else if (info->sechdrs[i].sh_type == SHT_RELA)
1493                         err = apply_relocate_add(info->sechdrs, info->strtab,
1494                                                  info->index.sym, i, mod);
1495                 if (err < 0)
1496                         break;
1497         }
1498         return err;
1499 }
1500 
1501 /* Additional bytes needed by arch in front of individual sections */
1502 unsigned int __weak arch_mod_section_prepend(struct module *mod,
1503                                              unsigned int section)
1504 {
1505         /* default implementation just returns zero */
1506         return 0;
1507 }
1508 
1509 long module_get_offset_and_type(struct module *mod, enum mod_mem_type type,
1510                                 Elf_Shdr *sechdr, unsigned int section)
1511 {
1512         long offset;
1513         long mask = ((unsigned long)(type) & SH_ENTSIZE_TYPE_MASK) << SH_ENTSIZE_TYPE_SHIFT;
1514 
1515         mod->mem[type].size += arch_mod_section_prepend(mod, section);
1516         offset = ALIGN(mod->mem[type].size, sechdr->sh_addralign ?: 1);
1517         mod->mem[type].size = offset + sechdr->sh_size;
1518 
1519         WARN_ON_ONCE(offset & mask);
1520         return offset | mask;
1521 }
1522 
1523 bool module_init_layout_section(const char *sname)
1524 {
1525 #ifndef CONFIG_MODULE_UNLOAD
1526         if (module_exit_section(sname))
1527                 return true;
1528 #endif
1529         return module_init_section(sname);
1530 }
1531 
1532 static void __layout_sections(struct module *mod, struct load_info *info, bool is_init)
1533 {
1534         unsigned int m, i;
1535 
1536         static const unsigned long masks[][2] = {
1537                 /*
1538                  * NOTE: all executable code must be the first section
1539                  * in this array; otherwise modify the text_size
1540                  * finder in the two loops below
1541                  */
1542                 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
1543                 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
1544                 { SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
1545                 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
1546                 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
1547         };
1548         static const int core_m_to_mem_type[] = {
1549                 MOD_TEXT,
1550                 MOD_RODATA,
1551                 MOD_RO_AFTER_INIT,
1552                 MOD_DATA,
1553                 MOD_DATA,
1554         };
1555         static const int init_m_to_mem_type[] = {
1556                 MOD_INIT_TEXT,
1557                 MOD_INIT_RODATA,
1558                 MOD_INVALID,
1559                 MOD_INIT_DATA,
1560                 MOD_INIT_DATA,
1561         };
1562 
1563         for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1564                 enum mod_mem_type type = is_init ? init_m_to_mem_type[m] : core_m_to_mem_type[m];
1565 
1566                 for (i = 0; i < info->hdr->e_shnum; ++i) {
1567                         Elf_Shdr *s = &info->sechdrs[i];
1568                         const char *sname = info->secstrings + s->sh_name;
1569 
1570                         if ((s->sh_flags & masks[m][0]) != masks[m][0]
1571                             || (s->sh_flags & masks[m][1])
1572                             || s->sh_entsize != ~0UL
1573                             || is_init != module_init_layout_section(sname))
1574                                 continue;
1575 
1576                         if (WARN_ON_ONCE(type == MOD_INVALID))
1577                                 continue;
1578 
1579                         s->sh_entsize = module_get_offset_and_type(mod, type, s, i);
1580                         pr_debug("\t%s\n", sname);
1581                 }
1582         }
1583 }
1584 
1585 /*
1586  * Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
1587  * might -- code, read-only data, read-write data, small data.  Tally
1588  * sizes, and place the offsets into sh_entsize fields: high bit means it
1589  * belongs in init.
1590  */
1591 static void layout_sections(struct module *mod, struct load_info *info)
1592 {
1593         unsigned int i;
1594 
1595         for (i = 0; i < info->hdr->e_shnum; i++)
1596                 info->sechdrs[i].sh_entsize = ~0UL;
1597 
1598         pr_debug("Core section allocation order for %s:\n", mod->name);
1599         __layout_sections(mod, info, false);
1600 
1601         pr_debug("Init section allocation order for %s:\n", mod->name);
1602         __layout_sections(mod, info, true);
1603 }
1604 
1605 static void module_license_taint_check(struct module *mod, const char *license)
1606 {
1607         if (!license)
1608                 license = "unspecified";
1609 
1610         if (!license_is_gpl_compatible(license)) {
1611                 if (!test_taint(TAINT_PROPRIETARY_MODULE))
1612                         pr_warn("%s: module license '%s' taints kernel.\n",
1613                                 mod->name, license);
1614                 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
1615                                  LOCKDEP_NOW_UNRELIABLE);
1616         }
1617 }
1618 
1619 static void setup_modinfo(struct module *mod, struct load_info *info)
1620 {
1621         struct module_attribute *attr;
1622         int i;
1623 
1624         for (i = 0; (attr = modinfo_attrs[i]); i++) {
1625                 if (attr->setup)
1626                         attr->setup(mod, get_modinfo(info, attr->attr.name));
1627         }
1628 }
1629 
1630 static void free_modinfo(struct module *mod)
1631 {
1632         struct module_attribute *attr;
1633         int i;
1634 
1635         for (i = 0; (attr = modinfo_attrs[i]); i++) {
1636                 if (attr->free)
1637                         attr->free(mod);
1638         }
1639 }
1640 
1641 bool __weak module_init_section(const char *name)
1642 {
1643         return strstarts(name, ".init");
1644 }
1645 
1646 bool __weak module_exit_section(const char *name)
1647 {
1648         return strstarts(name, ".exit");
1649 }
1650 
1651 static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
1652 {
1653 #if defined(CONFIG_64BIT)
1654         unsigned long long secend;
1655 #else
1656         unsigned long secend;
1657 #endif
1658 
1659         /*
1660          * Check for both overflow and offset/size being
1661          * too large.
1662          */
1663         secend = shdr->sh_offset + shdr->sh_size;
1664         if (secend < shdr->sh_offset || secend > info->len)
1665                 return -ENOEXEC;
1666 
1667         return 0;
1668 }
1669 
1670 /*
1671  * Check userspace passed ELF module against our expectations, and cache
1672  * useful variables for further processing as we go.
1673  *
1674  * This does basic validity checks against section offsets and sizes, the
1675  * section name string table, and the indices used for it (sh_name).
1676  *
1677  * As a last step, since we're already checking the ELF sections we cache
1678  * useful variables which will be used later for our convenience:
1679  *
1680  *      o pointers to section headers
1681  *      o cache the modinfo symbol section
1682  *      o cache the string symbol section
1683  *      o cache the module section
1684  *
1685  * As a last step we set info->mod to the temporary copy of the module in
1686  * info->hdr. The final one will be allocated in move_module(). Any
1687  * modifications we make to our copy of the module will be carried over
1688  * to the final minted module.
1689  */
1690 static int elf_validity_cache_copy(struct load_info *info, int flags)
1691 {
1692         unsigned int i;
1693         Elf_Shdr *shdr, *strhdr;
1694         int err;
1695         unsigned int num_mod_secs = 0, mod_idx;
1696         unsigned int num_info_secs = 0, info_idx;
1697         unsigned int num_sym_secs = 0, sym_idx;
1698 
1699         if (info->len < sizeof(*(info->hdr))) {
1700                 pr_err("Invalid ELF header len %lu\n", info->len);
1701                 goto no_exec;
1702         }
1703 
1704         if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0) {
1705                 pr_err("Invalid ELF header magic: != %s\n", ELFMAG);
1706                 goto no_exec;
1707         }
1708         if (info->hdr->e_type != ET_REL) {
1709                 pr_err("Invalid ELF header type: %u != %u\n",
1710                        info->hdr->e_type, ET_REL);
1711                 goto no_exec;
1712         }
1713         if (!elf_check_arch(info->hdr)) {
1714                 pr_err("Invalid architecture in ELF header: %u\n",
1715                        info->hdr->e_machine);
1716                 goto no_exec;
1717         }
1718         if (!module_elf_check_arch(info->hdr)) {
1719                 pr_err("Invalid module architecture in ELF header: %u\n",
1720                        info->hdr->e_machine);
1721                 goto no_exec;
1722         }
1723         if (info->hdr->e_shentsize != sizeof(Elf_Shdr)) {
1724                 pr_err("Invalid ELF section header size\n");
1725                 goto no_exec;
1726         }
1727 
1728         /*
1729          * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
1730          * known and small. So e_shnum * sizeof(Elf_Shdr)
1731          * will not overflow unsigned long on any platform.
1732          */
1733         if (info->hdr->e_shoff >= info->len
1734             || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
1735                 info->len - info->hdr->e_shoff)) {
1736                 pr_err("Invalid ELF section header overflow\n");
1737                 goto no_exec;
1738         }
1739 
1740         info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
1741 
1742         /*
1743          * Verify if the section name table index is valid.
1744          */
1745         if (info->hdr->e_shstrndx == SHN_UNDEF
1746             || info->hdr->e_shstrndx >= info->hdr->e_shnum) {
1747                 pr_err("Invalid ELF section name index: %d || e_shstrndx (%d) >= e_shnum (%d)\n",
1748                        info->hdr->e_shstrndx, info->hdr->e_shstrndx,
1749                        info->hdr->e_shnum);
1750                 goto no_exec;
1751         }
1752 
1753         strhdr = &info->sechdrs[info->hdr->e_shstrndx];
1754         err = validate_section_offset(info, strhdr);
1755         if (err < 0) {
1756                 pr_err("Invalid ELF section hdr(type %u)\n", strhdr->sh_type);
1757                 return err;
1758         }
1759 
1760         /*
1761          * The section name table must be NUL-terminated, as required
1762          * by the spec. This makes strcmp and pr_* calls that access
1763          * strings in the section safe.
1764          */
1765         info->secstrings = (void *)info->hdr + strhdr->sh_offset;
1766         if (strhdr->sh_size == 0) {
1767                 pr_err("empty section name table\n");
1768                 goto no_exec;
1769         }
1770         if (info->secstrings[strhdr->sh_size - 1] != '\0') {
1771                 pr_err("ELF Spec violation: section name table isn't null terminated\n");
1772                 goto no_exec;
1773         }
1774 
1775         /*
1776          * The code assumes that section 0 has a length of zero and
1777          * an addr of zero, so check for it.
1778          */
1779         if (info->sechdrs[0].sh_type != SHT_NULL
1780             || info->sechdrs[0].sh_size != 0
1781             || info->sechdrs[0].sh_addr != 0) {
1782                 pr_err("ELF Spec violation: section 0 type(%d)!=SH_NULL or non-zero len or addr\n",
1783                        info->sechdrs[0].sh_type);
1784                 goto no_exec;
1785         }
1786 
1787         for (i = 1; i < info->hdr->e_shnum; i++) {
1788                 shdr = &info->sechdrs[i];
1789                 switch (shdr->sh_type) {
1790                 case SHT_NULL:
1791                 case SHT_NOBITS:
1792                         continue;
1793                 case SHT_SYMTAB:
1794                         if (shdr->sh_link == SHN_UNDEF
1795                             || shdr->sh_link >= info->hdr->e_shnum) {
1796                                 pr_err("Invalid ELF sh_link!=SHN_UNDEF(%d) or (sh_link(%d) >= hdr->e_shnum(%d)\n",
1797                                        shdr->sh_link, shdr->sh_link,
1798                                        info->hdr->e_shnum);
1799                                 goto no_exec;
1800                         }
1801                         num_sym_secs++;
1802                         sym_idx = i;
1803                         fallthrough;
1804                 default:
1805                         err = validate_section_offset(info, shdr);
1806                         if (err < 0) {
1807                                 pr_err("Invalid ELF section in module (section %u type %u)\n",
1808                                         i, shdr->sh_type);
1809                                 return err;
1810                         }
1811                         if (strcmp(info->secstrings + shdr->sh_name,
1812                                    ".gnu.linkonce.this_module") == 0) {
1813                                 num_mod_secs++;
1814                                 mod_idx = i;
1815                         } else if (strcmp(info->secstrings + shdr->sh_name,
1816                                    ".modinfo") == 0) {
1817                                 num_info_secs++;
1818                                 info_idx = i;
1819                         }
1820 
1821                         if (shdr->sh_flags & SHF_ALLOC) {
1822                                 if (shdr->sh_name >= strhdr->sh_size) {
1823                                         pr_err("Invalid ELF section name in module (section %u type %u)\n",
1824                                                i, shdr->sh_type);
1825                                         return -ENOEXEC;
1826                                 }
1827                         }
1828                         break;
1829                 }
1830         }
1831 
1832         if (num_info_secs > 1) {
1833                 pr_err("Only one .modinfo section must exist.\n");
1834                 goto no_exec;
1835         } else if (num_info_secs == 1) {
1836                 /* Try to find a name early so we can log errors with a module name */
1837                 info->index.info = info_idx;
1838                 info->name = get_modinfo(info, "name");
1839         }
1840 
1841         if (num_sym_secs != 1) {
1842                 pr_warn("%s: module has no symbols (stripped?)\n",
1843                         info->name ?: "(missing .modinfo section or name field)");
1844                 goto no_exec;
1845         }
1846 
1847         /* Sets internal symbols and strings. */
1848         info->index.sym = sym_idx;
1849         shdr = &info->sechdrs[sym_idx];
1850         info->index.str = shdr->sh_link;
1851         info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset;
1852 
1853         /*
1854          * The ".gnu.linkonce.this_module" ELF section is special. It is
1855          * what modpost uses to refer to __this_module and let's use rely
1856          * on THIS_MODULE to point to &__this_module properly. The kernel's
1857          * modpost declares it on each modules's *.mod.c file. If the struct
1858          * module of the kernel changes a full kernel rebuild is required.
1859          *
1860          * We have a few expectaions for this special section, the following
1861          * code validates all this for us:
1862          *
1863          *   o Only one section must exist
1864          *   o We expect the kernel to always have to allocate it: SHF_ALLOC
1865          *   o The section size must match the kernel's run time's struct module
1866          *     size
1867          */
1868         if (num_mod_secs != 1) {
1869                 pr_err("module %s: Only one .gnu.linkonce.this_module section must exist.\n",
1870                        info->name ?: "(missing .modinfo section or name field)");
1871                 goto no_exec;
1872         }
1873 
1874         shdr = &info->sechdrs[mod_idx];
1875 
1876         /*
1877          * This is already implied on the switch above, however let's be
1878          * pedantic about it.
1879          */
1880         if (shdr->sh_type == SHT_NOBITS) {
1881                 pr_err("module %s: .gnu.linkonce.this_module section must have a size set\n",
1882                        info->name ?: "(missing .modinfo section or name field)");
1883                 goto no_exec;
1884         }
1885 
1886         if (!(shdr->sh_flags & SHF_ALLOC)) {
1887                 pr_err("module %s: .gnu.linkonce.this_module must occupy memory during process execution\n",
1888                        info->name ?: "(missing .modinfo section or name field)");
1889                 goto no_exec;
1890         }
1891 
1892         if (shdr->sh_size != sizeof(struct module)) {
1893                 pr_err("module %s: .gnu.linkonce.this_module section size must match the kernel's built struct module size at run time\n",
1894                        info->name ?: "(missing .modinfo section or name field)");
1895                 goto no_exec;
1896         }
1897 
1898         info->index.mod = mod_idx;
1899 
1900         /* This is temporary: point mod into copy of data. */
1901         info->mod = (void *)info->hdr + shdr->sh_offset;
1902 
1903         /*
1904          * If we didn't load the .modinfo 'name' field earlier, fall back to
1905          * on-disk struct mod 'name' field.
1906          */
1907         if (!info->name)
1908                 info->name = info->mod->name;
1909 
1910         if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
1911                 info->index.vers = 0; /* Pretend no __versions section! */
1912         else
1913                 info->index.vers = find_sec(info, "__versions");
1914 
1915         info->index.pcpu = find_pcpusec(info);
1916 
1917         return 0;
1918 
1919 no_exec:
1920         return -ENOEXEC;
1921 }
1922 
1923 #define COPY_CHUNK_SIZE (16*PAGE_SIZE)
1924 
1925 static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
1926 {
1927         do {
1928                 unsigned long n = min(len, COPY_CHUNK_SIZE);
1929 
1930                 if (copy_from_user(dst, usrc, n) != 0)
1931                         return -EFAULT;
1932                 cond_resched();
1933                 dst += n;
1934                 usrc += n;
1935                 len -= n;
1936         } while (len);
1937         return 0;
1938 }
1939 
1940 static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
1941 {
1942         if (!get_modinfo(info, "livepatch"))
1943                 /* Nothing more to do */
1944                 return 0;
1945 
1946         if (set_livepatch_module(mod))
1947                 return 0;
1948 
1949         pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
1950                mod->name);
1951         return -ENOEXEC;
1952 }
1953 
1954 static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
1955 {
1956         if (retpoline_module_ok(get_modinfo(info, "retpoline")))
1957                 return;
1958 
1959         pr_warn("%s: loading module not compiled with retpoline compiler.\n",
1960                 mod->name);
1961 }
1962 
1963 /* Sets info->hdr and info->len. */
1964 static int copy_module_from_user(const void __user *umod, unsigned long len,
1965                                   struct load_info *info)
1966 {
1967         int err;
1968 
1969         info->len = len;
1970         if (info->len < sizeof(*(info->hdr)))
1971                 return -ENOEXEC;
1972 
1973         err = security_kernel_load_data(LOADING_MODULE, true);
1974         if (err)
1975                 return err;
1976 
1977         /* Suck in entire file: we'll want most of it. */
1978         info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
1979         if (!info->hdr)
1980                 return -ENOMEM;
1981 
1982         if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
1983                 err = -EFAULT;
1984                 goto out;
1985         }
1986 
1987         err = security_kernel_post_load_data((char *)info->hdr, info->len,
1988                                              LOADING_MODULE, "init_module");
1989 out:
1990         if (err)
1991                 vfree(info->hdr);
1992 
1993         return err;
1994 }
1995 
1996 static void free_copy(struct load_info *info, int flags)
1997 {
1998         if (flags & MODULE_INIT_COMPRESSED_FILE)
1999                 module_decompress_cleanup(info);
2000         else
2001                 vfree(info->hdr);
2002 }
2003 
2004 static int rewrite_section_headers(struct load_info *info, int flags)
2005 {
2006         unsigned int i;
2007 
2008         /* This should always be true, but let's be sure. */
2009         info->sechdrs[0].sh_addr = 0;
2010 
2011         for (i = 1; i < info->hdr->e_shnum; i++) {
2012                 Elf_Shdr *shdr = &info->sechdrs[i];
2013 
2014                 /*
2015                  * Mark all sections sh_addr with their address in the
2016                  * temporary image.
2017                  */
2018                 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2019 
2020         }
2021 
2022         /* Track but don't keep modinfo and version sections. */
2023         info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2024         info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2025 
2026         return 0;
2027 }
2028 
2029 /*
2030  * These calls taint the kernel depending certain module circumstances */
2031 static void module_augment_kernel_taints(struct module *mod, struct load_info *info)
2032 {
2033         int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
2034 
2035         if (!get_modinfo(info, "intree")) {
2036                 if (!test_taint(TAINT_OOT_MODULE))
2037                         pr_warn("%s: loading out-of-tree module taints kernel.\n",
2038                                 mod->name);
2039                 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2040         }
2041 
2042         check_modinfo_retpoline(mod, info);
2043 
2044         if (get_modinfo(info, "staging")) {
2045                 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2046                 pr_warn("%s: module is from the staging directory, the quality "
2047                         "is unknown, you have been warned.\n", mod->name);
2048         }
2049 
2050         if (is_livepatch_module(mod)) {
2051                 add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2052                 pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2053                                 mod->name);
2054         }
2055 
2056         module_license_taint_check(mod, get_modinfo(info, "license"));
2057 
2058         if (get_modinfo(info, "test")) {
2059                 if (!test_taint(TAINT_TEST))
2060                         pr_warn("%s: loading test module taints kernel.\n",
2061                                 mod->name);
2062                 add_taint_module(mod, TAINT_TEST, LOCKDEP_STILL_OK);
2063         }
2064 #ifdef CONFIG_MODULE_SIG
2065         mod->sig_ok = info->sig_ok;
2066         if (!mod->sig_ok) {
2067                 pr_notice_once("%s: module verification failed: signature "
2068                                "and/or required key missing - tainting "
2069                                "kernel\n", mod->name);
2070                 add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
2071         }
2072 #endif
2073 
2074         /*
2075          * ndiswrapper is under GPL by itself, but loads proprietary modules.
2076          * Don't use add_taint_module(), as it would prevent ndiswrapper from
2077          * using GPL-only symbols it needs.
2078          */
2079         if (strcmp(mod->name, "ndiswrapper") == 0)
2080                 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2081 
2082         /* driverloader was caught wrongly pretending to be under GPL */
2083         if (strcmp(mod->name, "driverloader") == 0)
2084                 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2085                                  LOCKDEP_NOW_UNRELIABLE);
2086 
2087         /* lve claims to be GPL but upstream won't provide source */
2088         if (strcmp(mod->name, "lve") == 0)
2089                 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2090                                  LOCKDEP_NOW_UNRELIABLE);
2091 
2092         if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
2093                 pr_warn("%s: module license taints kernel.\n", mod->name);
2094 
2095 }
2096 
2097 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2098 {
2099         const char *modmagic = get_modinfo(info, "vermagic");
2100         int err;
2101 
2102         if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2103                 modmagic = NULL;
2104 
2105         /* This is allowed: modprobe --force will invalidate it. */
2106         if (!modmagic) {
2107                 err = try_to_force_load(mod, "bad vermagic");
2108                 if (err)
2109                         return err;
2110         } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2111                 pr_err("%s: version magic '%s' should be '%s'\n",
2112                        info->name, modmagic, vermagic);
2113                 return -ENOEXEC;
2114         }
2115 
2116         err = check_modinfo_livepatch(mod, info);
2117         if (err)
2118                 return err;
2119 
2120         return 0;
2121 }
2122 
2123 static int find_module_sections(struct module *mod, struct load_info *info)
2124 {
2125         mod->kp = section_objs(info, "__param",
2126                                sizeof(*mod->kp), &mod->num_kp);
2127         mod->syms = section_objs(info, "__ksymtab",
2128                                  sizeof(*mod->syms), &mod->num_syms);
2129         mod->crcs = section_addr(info, "__kcrctab");
2130         mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2131                                      sizeof(*mod->gpl_syms),
2132                                      &mod->num_gpl_syms);
2133         mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2134 
2135 #ifdef CONFIG_CONSTRUCTORS
2136         mod->ctors = section_objs(info, ".ctors",
2137                                   sizeof(*mod->ctors), &mod->num_ctors);
2138         if (!mod->ctors)
2139                 mod->ctors = section_objs(info, ".init_array",
2140                                 sizeof(*mod->ctors), &mod->num_ctors);
2141         else if (find_sec(info, ".init_array")) {
2142                 /*
2143                  * This shouldn't happen with same compiler and binutils
2144                  * building all parts of the module.
2145                  */
2146                 pr_warn("%s: has both .ctors and .init_array.\n",
2147                        mod->name);
2148                 return -EINVAL;
2149         }
2150 #endif
2151 
2152         mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
2153                                                 &mod->noinstr_text_size);
2154 
2155 #ifdef CONFIG_TRACEPOINTS
2156         mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2157                                              sizeof(*mod->tracepoints_ptrs),
2158                                              &mod->num_tracepoints);
2159 #endif
2160 #ifdef CONFIG_TREE_SRCU
2161         mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
2162                                              sizeof(*mod->srcu_struct_ptrs),
2163                                              &mod->num_srcu_structs);
2164 #endif
2165 #ifdef CONFIG_BPF_EVENTS
2166         mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
2167                                            sizeof(*mod->bpf_raw_events),
2168                                            &mod->num_bpf_raw_events);
2169 #endif
2170 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
2171         mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size);
2172         mod->btf_base_data = any_section_objs(info, ".BTF.base", 1,
2173                                               &mod->btf_base_data_size);
2174 #endif
2175 #ifdef CONFIG_JUMP_LABEL
2176         mod->jump_entries = section_objs(info, "__jump_table",
2177                                         sizeof(*mod->jump_entries),
2178                                         &mod->num_jump_entries);
2179 #endif
2180 #ifdef CONFIG_EVENT_TRACING
2181         mod->trace_events = section_objs(info, "_ftrace_events",
2182                                          sizeof(*mod->trace_events),
2183                                          &mod->num_trace_events);
2184         mod->trace_evals = section_objs(info, "_ftrace_eval_map",
2185                                         sizeof(*mod->trace_evals),
2186                                         &mod->num_trace_evals);
2187 #endif
2188 #ifdef CONFIG_TRACING
2189         mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2190                                          sizeof(*mod->trace_bprintk_fmt_start),
2191                                          &mod->num_trace_bprintk_fmt);
2192 #endif
2193 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2194         /* sechdrs[0].sh_size is always zero */
2195         mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
2196                                              sizeof(*mod->ftrace_callsites),
2197                                              &mod->num_ftrace_callsites);
2198 #endif
2199 #ifdef CONFIG_FUNCTION_ERROR_INJECTION
2200         mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
2201                                             sizeof(*mod->ei_funcs),
2202                                             &mod->num_ei_funcs);
2203 #endif
2204 #ifdef CONFIG_KPROBES
2205         mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
2206                                                 &mod->kprobes_text_size);
2207         mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
2208                                                 sizeof(unsigned long),
2209                                                 &mod->num_kprobe_blacklist);
2210 #endif
2211 #ifdef CONFIG_PRINTK_INDEX
2212         mod->printk_index_start = section_objs(info, ".printk_index",
2213                                                sizeof(*mod->printk_index_start),
2214                                                &mod->printk_index_size);
2215 #endif
2216 #ifdef CONFIG_HAVE_STATIC_CALL_INLINE
2217         mod->static_call_sites = section_objs(info, ".static_call_sites",
2218                                               sizeof(*mod->static_call_sites),
2219                                               &mod->num_static_call_sites);
2220 #endif
2221 #if IS_ENABLED(CONFIG_KUNIT)
2222         mod->kunit_suites = section_objs(info, ".kunit_test_suites",
2223                                               sizeof(*mod->kunit_suites),
2224                                               &mod->num_kunit_suites);
2225         mod->kunit_init_suites = section_objs(info, ".kunit_init_test_suites",
2226                                               sizeof(*mod->kunit_init_suites),
2227                                               &mod->num_kunit_init_suites);
2228 #endif
2229 
2230         mod->extable = section_objs(info, "__ex_table",
2231                                     sizeof(*mod->extable), &mod->num_exentries);
2232 
2233         if (section_addr(info, "__obsparm"))
2234                 pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
2235 
2236 #ifdef CONFIG_DYNAMIC_DEBUG_CORE
2237         mod->dyndbg_info.descs = section_objs(info, "__dyndbg",
2238                                               sizeof(*mod->dyndbg_info.descs),
2239                                               &mod->dyndbg_info.num_descs);
2240         mod->dyndbg_info.classes = section_objs(info, "__dyndbg_classes",
2241                                                 sizeof(*mod->dyndbg_info.classes),
2242                                                 &mod->dyndbg_info.num_classes);
2243 #endif
2244 
2245         return 0;
2246 }
2247 
2248 static int move_module(struct module *mod, struct load_info *info)
2249 {
2250         int i;
2251         enum mod_mem_type t = 0;
2252         int ret = -ENOMEM;
2253 
2254         for_each_mod_mem_type(type) {
2255                 if (!mod->mem[type].size) {
2256                         mod->mem[type].base = NULL;
2257                         continue;
2258                 }
2259 
2260                 ret = module_memory_alloc(mod, type);
2261                 if (ret) {
2262                         t = type;
2263                         goto out_enomem;
2264                 }
2265         }
2266 
2267         /* Transfer each section which specifies SHF_ALLOC */
2268         pr_debug("Final section addresses for %s:\n", mod->name);
2269         for (i = 0; i < info->hdr->e_shnum; i++) {
2270                 void *dest;
2271                 Elf_Shdr *shdr = &info->sechdrs[i];
2272                 enum mod_mem_type type = shdr->sh_entsize >> SH_ENTSIZE_TYPE_SHIFT;
2273 
2274                 if (!(shdr->sh_flags & SHF_ALLOC))
2275                         continue;
2276 
2277                 dest = mod->mem[type].base + (shdr->sh_entsize & SH_ENTSIZE_OFFSET_MASK);
2278 
2279                 if (shdr->sh_type != SHT_NOBITS) {
2280                         /*
2281                          * Our ELF checker already validated this, but let's
2282                          * be pedantic and make the goal clearer. We actually
2283                          * end up copying over all modifications made to the
2284                          * userspace copy of the entire struct module.
2285                          */
2286                         if (i == info->index.mod &&
2287                            (WARN_ON_ONCE(shdr->sh_size != sizeof(struct module)))) {
2288                                 ret = -ENOEXEC;
2289                                 goto out_enomem;
2290                         }
2291                         memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2292                 }
2293                 /*
2294                  * Update the userspace copy's ELF section address to point to
2295                  * our newly allocated memory as a pure convenience so that
2296                  * users of info can keep taking advantage and using the newly
2297                  * minted official memory area.
2298                  */
2299                 shdr->sh_addr = (unsigned long)dest;
2300                 pr_debug("\t0x%lx 0x%.8lx %s\n", (long)shdr->sh_addr,
2301                          (long)shdr->sh_size, info->secstrings + shdr->sh_name);
2302         }
2303 
2304         return 0;
2305 out_enomem:
2306         for (t--; t >= 0; t--)
2307                 module_memory_free(mod, t, true);
2308         return ret;
2309 }
2310 
2311 static int check_export_symbol_versions(struct module *mod)
2312 {
2313 #ifdef CONFIG_MODVERSIONS
2314         if ((mod->num_syms && !mod->crcs) ||
2315             (mod->num_gpl_syms && !mod->gpl_crcs)) {
2316                 return try_to_force_load(mod,
2317                                          "no versions for exported symbols");
2318         }
2319 #endif
2320         return 0;
2321 }
2322 
2323 static void flush_module_icache(const struct module *mod)
2324 {
2325         /*
2326          * Flush the instruction cache, since we've played with text.
2327          * Do it before processing of module parameters, so the module
2328          * can provide parameter accessor functions of its own.
2329          */
2330         for_each_mod_mem_type(type) {
2331                 const struct module_memory *mod_mem = &mod->mem[type];
2332 
2333                 if (mod_mem->size) {
2334                         flush_icache_range((unsigned long)mod_mem->base,
2335                                            (unsigned long)mod_mem->base + mod_mem->size);
2336                 }
2337         }
2338 }
2339 
2340 bool __weak module_elf_check_arch(Elf_Ehdr *hdr)
2341 {
2342         return true;
2343 }
2344 
2345 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2346                                      Elf_Shdr *sechdrs,
2347                                      char *secstrings,
2348                                      struct module *mod)
2349 {
2350         return 0;
2351 }
2352 
2353 /* module_blacklist is a comma-separated list of module names */
2354 static char *module_blacklist;
2355 static bool blacklisted(const char *module_name)
2356 {
2357         const char *p;
2358         size_t len;
2359 
2360         if (!module_blacklist)
2361                 return false;
2362 
2363         for (p = module_blacklist; *p; p += len) {
2364                 len = strcspn(p, ",");
2365                 if (strlen(module_name) == len && !memcmp(module_name, p, len))
2366                         return true;
2367                 if (p[len] == ',')
2368                         len++;
2369         }
2370         return false;
2371 }
2372 core_param(module_blacklist, module_blacklist, charp, 0400);
2373 
2374 static struct module *layout_and_allocate(struct load_info *info, int flags)
2375 {
2376         struct module *mod;
2377         unsigned int ndx;
2378         int err;
2379 
2380         /* Allow arches to frob section contents and sizes.  */
2381         err = module_frob_arch_sections(info->hdr, info->sechdrs,
2382                                         info->secstrings, info->mod);
2383         if (err < 0)
2384                 return ERR_PTR(err);
2385 
2386         err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
2387                                           info->secstrings, info->mod);
2388         if (err < 0)
2389                 return ERR_PTR(err);
2390 
2391         /* We will do a special allocation for per-cpu sections later. */
2392         info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2393 
2394         /*
2395          * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
2396          * layout_sections() can put it in the right place.
2397          * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
2398          */
2399         ndx = find_sec(info, ".data..ro_after_init");
2400         if (ndx)
2401                 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
2402         /*
2403          * Mark the __jump_table section as ro_after_init as well: these data
2404          * structures are never modified, with the exception of entries that
2405          * refer to code in the __init section, which are annotated as such
2406          * at module load time.
2407          */
2408         ndx = find_sec(info, "__jump_table");
2409         if (ndx)
2410                 info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
2411 
2412         /*
2413          * Determine total sizes, and put offsets in sh_entsize.  For now
2414          * this is done generically; there doesn't appear to be any
2415          * special cases for the architectures.
2416          */
2417         layout_sections(info->mod, info);
2418         layout_symtab(info->mod, info);
2419 
2420         /* Allocate and move to the final place */
2421         err = move_module(info->mod, info);
2422         if (err)
2423                 return ERR_PTR(err);
2424 
2425         /* Module has been copied to its final place now: return it. */
2426         mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2427         kmemleak_load_module(mod, info);
2428         return mod;
2429 }
2430 
2431 /* mod is no longer valid after this! */
2432 static void module_deallocate(struct module *mod, struct load_info *info)
2433 {
2434         percpu_modfree(mod);
2435         module_arch_freeing_init(mod);
2436 
2437         free_mod_mem(mod, true);
2438 }
2439 
2440 int __weak module_finalize(const Elf_Ehdr *hdr,
2441                            const Elf_Shdr *sechdrs,
2442                            struct module *me)
2443 {
2444         return 0;
2445 }
2446 
2447 static int post_relocation(struct module *mod, const struct load_info *info)
2448 {
2449         /* Sort exception table now relocations are done. */
2450         sort_extable(mod->extable, mod->extable + mod->num_exentries);
2451 
2452         /* Copy relocated percpu area over. */
2453         percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2454                        info->sechdrs[info->index.pcpu].sh_size);
2455 
2456         /* Setup kallsyms-specific fields. */
2457         add_kallsyms(mod, info);
2458 
2459         /* Arch-specific module finalizing. */
2460         return module_finalize(info->hdr, info->sechdrs, mod);
2461 }
2462 
2463 /* Call module constructors. */
2464 static void do_mod_ctors(struct module *mod)
2465 {
2466 #ifdef CONFIG_CONSTRUCTORS
2467         unsigned long i;
2468 
2469         for (i = 0; i < mod->num_ctors; i++)
2470                 mod->ctors[i]();
2471 #endif
2472 }
2473 
2474 /* For freeing module_init on success, in case kallsyms traversing */
2475 struct mod_initfree {
2476         struct llist_node node;
2477         void *init_text;
2478         void *init_data;
2479         void *init_rodata;
2480 };
2481 
2482 static void do_free_init(struct work_struct *w)
2483 {
2484         struct llist_node *pos, *n, *list;
2485         struct mod_initfree *initfree;
2486 
2487         list = llist_del_all(&init_free_list);
2488 
2489         synchronize_rcu();
2490 
2491         llist_for_each_safe(pos, n, list) {
2492                 initfree = container_of(pos, struct mod_initfree, node);
2493                 execmem_free(initfree->init_text);
2494                 execmem_free(initfree->init_data);
2495                 execmem_free(initfree->init_rodata);
2496                 kfree(initfree);
2497         }
2498 }
2499 
2500 void flush_module_init_free_work(void)
2501 {
2502         flush_work(&init_free_wq);
2503 }
2504 
2505 #undef MODULE_PARAM_PREFIX
2506 #define MODULE_PARAM_PREFIX "module."
2507 /* Default value for module->async_probe_requested */
2508 static bool async_probe;
2509 module_param(async_probe, bool, 0644);
2510 
2511 /*
2512  * This is where the real work happens.
2513  *
2514  * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
2515  * helper command 'lx-symbols'.
2516  */
2517 static noinline int do_init_module(struct module *mod)
2518 {
2519         int ret = 0;
2520         struct mod_initfree *freeinit;
2521 #if defined(CONFIG_MODULE_STATS)
2522         unsigned int text_size = 0, total_size = 0;
2523 
2524         for_each_mod_mem_type(type) {
2525                 const struct module_memory *mod_mem = &mod->mem[type];
2526                 if (mod_mem->size) {
2527                         total_size += mod_mem->size;
2528                         if (type == MOD_TEXT || type == MOD_INIT_TEXT)
2529                                 text_size += mod_mem->size;
2530                 }
2531         }
2532 #endif
2533 
2534         freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
2535         if (!freeinit) {
2536                 ret = -ENOMEM;
2537                 goto fail;
2538         }
2539         freeinit->init_text = mod->mem[MOD_INIT_TEXT].base;
2540         freeinit->init_data = mod->mem[MOD_INIT_DATA].base;
2541         freeinit->init_rodata = mod->mem[MOD_INIT_RODATA].base;
2542 
2543         do_mod_ctors(mod);
2544         /* Start the module */
2545         if (mod->init != NULL)
2546                 ret = do_one_initcall(mod->init);
2547         if (ret < 0) {
2548                 goto fail_free_freeinit;
2549         }
2550         if (ret > 0) {
2551                 pr_warn("%s: '%s'->init suspiciously returned %d, it should "
2552                         "follow 0/-E convention\n"
2553                         "%s: loading module anyway...\n",
2554                         __func__, mod->name, ret, __func__);
2555                 dump_stack();
2556         }
2557 
2558         /* Now it's a first class citizen! */
2559         mod->state = MODULE_STATE_LIVE;
2560         blocking_notifier_call_chain(&module_notify_list,
2561                                      MODULE_STATE_LIVE, mod);
2562 
2563         /* Delay uevent until module has finished its init routine */
2564         kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
2565 
2566         /*
2567          * We need to finish all async code before the module init sequence
2568          * is done. This has potential to deadlock if synchronous module
2569          * loading is requested from async (which is not allowed!).
2570          *
2571          * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
2572          * request_module() from async workers") for more details.
2573          */
2574         if (!mod->async_probe_requested)
2575                 async_synchronize_full();
2576 
2577         ftrace_free_mem(mod, mod->mem[MOD_INIT_TEXT].base,
2578                         mod->mem[MOD_INIT_TEXT].base + mod->mem[MOD_INIT_TEXT].size);
2579         mutex_lock(&module_mutex);
2580         /* Drop initial reference. */
2581         module_put(mod);
2582         trim_init_extable(mod);
2583 #ifdef CONFIG_KALLSYMS
2584         /* Switch to core kallsyms now init is done: kallsyms may be walking! */
2585         rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
2586 #endif
2587         ret = module_enable_rodata_ro(mod, true);
2588         if (ret)
2589                 goto fail_mutex_unlock;
2590         mod_tree_remove_init(mod);
2591         module_arch_freeing_init(mod);
2592         for_class_mod_mem_type(type, init) {
2593                 mod->mem[type].base = NULL;
2594                 mod->mem[type].size = 0;
2595         }
2596 
2597 #ifdef CONFIG_DEBUG_INFO_BTF_MODULES
2598         /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */
2599         mod->btf_data = NULL;
2600         mod->btf_base_data = NULL;
2601 #endif
2602         /*
2603          * We want to free module_init, but be aware that kallsyms may be
2604          * walking this with preempt disabled.  In all the failure paths, we
2605          * call synchronize_rcu(), but we don't want to slow down the success
2606          * path. execmem_free() cannot be called in an interrupt, so do the
2607          * work and call synchronize_rcu() in a work queue.
2608          *
2609          * Note that execmem_alloc() on most architectures creates W+X page
2610          * mappings which won't be cleaned up until do_free_init() runs.  Any
2611          * code such as mark_rodata_ro() which depends on those mappings to
2612          * be cleaned up needs to sync with the queued work by invoking
2613          * flush_module_init_free_work().
2614          */
2615         if (llist_add(&freeinit->node, &init_free_list))
2616                 schedule_work(&init_free_wq);
2617 
2618         mutex_unlock(&module_mutex);
2619         wake_up_all(&module_wq);
2620 
2621         mod_stat_add_long(text_size, &total_text_size);
2622         mod_stat_add_long(total_size, &total_mod_size);
2623 
2624         mod_stat_inc(&modcount);
2625 
2626         return 0;
2627 
2628 fail_mutex_unlock:
2629         mutex_unlock(&module_mutex);
2630 fail_free_freeinit:
2631         kfree(freeinit);
2632 fail:
2633         /* Try to protect us from buggy refcounters. */
2634         mod->state = MODULE_STATE_GOING;
2635         synchronize_rcu();
2636         module_put(mod);
2637         blocking_notifier_call_chain(&module_notify_list,
2638                                      MODULE_STATE_GOING, mod);
2639         klp_module_going(mod);
2640         ftrace_release_mod(mod);
2641         free_module(mod);
2642         wake_up_all(&module_wq);
2643 
2644         return ret;
2645 }
2646 
2647 static int may_init_module(void)
2648 {
2649         if (!capable(CAP_SYS_MODULE) || modules_disabled)
2650                 return -EPERM;
2651         if (!ccs_capable(CCS_USE_KERNEL_MODULE))
2652                 return -EPERM;
2653 
2654         return 0;
2655 }
2656 
2657 /* Is this module of this name done loading?  No locks held. */
2658 static bool finished_loading(const char *name)
2659 {
2660         struct module *mod;
2661         bool ret;
2662 
2663         /*
2664          * The module_mutex should not be a heavily contended lock;
2665          * if we get the occasional sleep here, we'll go an extra iteration
2666          * in the wait_event_interruptible(), which is harmless.
2667          */
2668         sched_annotate_sleep();
2669         mutex_lock(&module_mutex);
2670         mod = find_module_all(name, strlen(name), true);
2671         ret = !mod || mod->state == MODULE_STATE_LIVE
2672                 || mod->state == MODULE_STATE_GOING;
2673         mutex_unlock(&module_mutex);
2674 
2675         return ret;
2676 }
2677 
2678 /* Must be called with module_mutex held */
2679 static int module_patient_check_exists(const char *name,
2680                                        enum fail_dup_mod_reason reason)
2681 {
2682         struct module *old;
2683         int err = 0;
2684 
2685         old = find_module_all(name, strlen(name), true);
2686         if (old == NULL)
2687                 return 0;
2688 
2689         if (old->state == MODULE_STATE_COMING ||
2690             old->state == MODULE_STATE_UNFORMED) {
2691                 /* Wait in case it fails to load. */
2692                 mutex_unlock(&module_mutex);
2693                 err = wait_event_interruptible(module_wq,
2694                                        finished_loading(name));
2695                 mutex_lock(&module_mutex);
2696                 if (err)
2697                         return err;
2698 
2699                 /* The module might have gone in the meantime. */
2700                 old = find_module_all(name, strlen(name), true);
2701         }
2702 
2703         if (try_add_failed_module(name, reason))
2704                 pr_warn("Could not add fail-tracking for module: %s\n", name);
2705 
2706         /*
2707          * We are here only when the same module was being loaded. Do
2708          * not try to load it again right now. It prevents long delays
2709          * caused by serialized module load failures. It might happen
2710          * when more devices of the same type trigger load of
2711          * a particular module.
2712          */
2713         if (old && old->state == MODULE_STATE_LIVE)
2714                 return -EEXIST;
2715         return -EBUSY;
2716 }
2717 
2718 /*
2719  * We try to place it in the list now to make sure it's unique before
2720  * we dedicate too many resources.  In particular, temporary percpu
2721  * memory exhaustion.
2722  */
2723 static int add_unformed_module(struct module *mod)
2724 {
2725         int err;
2726 
2727         mod->state = MODULE_STATE_UNFORMED;
2728 
2729         mutex_lock(&module_mutex);
2730         err = module_patient_check_exists(mod->name, FAIL_DUP_MOD_LOAD);
2731         if (err)
2732                 goto out;
2733 
2734         mod_update_bounds(mod);
2735         list_add_rcu(&mod->list, &modules);
2736         mod_tree_insert(mod);
2737         err = 0;
2738 
2739 out:
2740         mutex_unlock(&module_mutex);
2741         return err;
2742 }
2743 
2744 static int complete_formation(struct module *mod, struct load_info *info)
2745 {
2746         int err;
2747 
2748         mutex_lock(&module_mutex);
2749 
2750         /* Find duplicate symbols (must be called under lock). */
2751         err = verify_exported_symbols(mod);
2752         if (err < 0)
2753                 goto out;
2754 
2755         /* These rely on module_mutex for list integrity. */
2756         module_bug_finalize(info->hdr, info->sechdrs, mod);
2757         module_cfi_finalize(info->hdr, info->sechdrs, mod);
2758 
2759         err = module_enable_rodata_ro(mod, false);
2760         if (err)
2761                 goto out_strict_rwx;
2762         err = module_enable_data_nx(mod);
2763         if (err)
2764                 goto out_strict_rwx;
2765         err = module_enable_text_rox(mod);
2766         if (err)
2767                 goto out_strict_rwx;
2768 
2769         /*
2770          * Mark state as coming so strong_try_module_get() ignores us,
2771          * but kallsyms etc. can see us.
2772          */
2773         mod->state = MODULE_STATE_COMING;
2774         mutex_unlock(&module_mutex);
2775 
2776         return 0;
2777 
2778 out_strict_rwx:
2779         module_bug_cleanup(mod);
2780 out:
2781         mutex_unlock(&module_mutex);
2782         return err;
2783 }
2784 
2785 static int prepare_coming_module(struct module *mod)
2786 {
2787         int err;
2788 
2789         ftrace_module_enable(mod);
2790         err = klp_module_coming(mod);
2791         if (err)
2792                 return err;
2793 
2794         err = blocking_notifier_call_chain_robust(&module_notify_list,
2795                         MODULE_STATE_COMING, MODULE_STATE_GOING, mod);
2796         err = notifier_to_errno(err);
2797         if (err)
2798                 klp_module_going(mod);
2799 
2800         return err;
2801 }
2802 
2803 static int unknown_module_param_cb(char *param, char *val, const char *modname,
2804                                    void *arg)
2805 {
2806         struct module *mod = arg;
2807         int ret;
2808 
2809         if (strcmp(param, "async_probe") == 0) {
2810                 if (kstrtobool(val, &mod->async_probe_requested))
2811                         mod->async_probe_requested = true;
2812                 return 0;
2813         }
2814 
2815         /* Check for magic 'dyndbg' arg */
2816         ret = ddebug_dyndbg_module_param_cb(param, val, modname);
2817         if (ret != 0)
2818                 pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
2819         return 0;
2820 }
2821 
2822 /* Module within temporary copy, this doesn't do any allocation  */
2823 static int early_mod_check(struct load_info *info, int flags)
2824 {
2825         int err;
2826 
2827         /*
2828          * Now that we know we have the correct module name, check
2829          * if it's blacklisted.
2830          */
2831         if (blacklisted(info->name)) {
2832                 pr_err("Module %s is blacklisted\n", info->name);
2833                 return -EPERM;
2834         }
2835 
2836         err = rewrite_section_headers(info, flags);
2837         if (err)
2838                 return err;
2839 
2840         /* Check module struct version now, before we try to use module. */
2841         if (!check_modstruct_version(info, info->mod))
2842                 return -ENOEXEC;
2843 
2844         err = check_modinfo(info->mod, info, flags);
2845         if (err)
2846                 return err;
2847 
2848         mutex_lock(&module_mutex);
2849         err = module_patient_check_exists(info->mod->name, FAIL_DUP_MOD_BECOMING);
2850         mutex_unlock(&module_mutex);
2851 
2852         return err;
2853 }
2854 
2855 /*
2856  * Allocate and load the module: note that size of section 0 is always
2857  * zero, and we rely on this for optional sections.
2858  */
2859 static int load_module(struct load_info *info, const char __user *uargs,
2860                        int flags)
2861 {
2862         struct module *mod;
2863         bool module_allocated = false;
2864         long err = 0;
2865         char *after_dashes;
2866 
2867         /*
2868          * Do the signature check (if any) first. All that
2869          * the signature check needs is info->len, it does
2870          * not need any of the section info. That can be
2871          * set up later. This will minimize the chances
2872          * of a corrupt module causing problems before
2873          * we even get to the signature check.
2874          *
2875          * The check will also adjust info->len by stripping
2876          * off the sig length at the end of the module, making
2877          * checks against info->len more correct.
2878          */
2879         err = module_sig_check(info, flags);
2880         if (err)
2881                 goto free_copy;
2882 
2883         /*
2884          * Do basic sanity checks against the ELF header and
2885          * sections. Cache useful sections and set the
2886          * info->mod to the userspace passed struct module.
2887          */
2888         err = elf_validity_cache_copy(info, flags);
2889         if (err)
2890                 goto free_copy;
2891 
2892         err = early_mod_check(info, flags);
2893         if (err)
2894                 goto free_copy;
2895 
2896         /* Figure out module layout, and allocate all the memory. */
2897         mod = layout_and_allocate(info, flags);
2898         if (IS_ERR(mod)) {
2899                 err = PTR_ERR(mod);
2900                 goto free_copy;
2901         }
2902 
2903         module_allocated = true;
2904 
2905         audit_log_kern_module(mod->name);
2906 
2907         /* Reserve our place in the list. */
2908         err = add_unformed_module(mod);
2909         if (err)
2910                 goto free_module;
2911 
2912         /*
2913          * We are tainting your kernel if your module gets into
2914          * the modules linked list somehow.
2915          */
2916         module_augment_kernel_taints(mod, info);
2917 
2918         /* To avoid stressing percpu allocator, do this once we're unique. */
2919         err = percpu_modalloc(mod, info);
2920         if (err)
2921                 goto unlink_mod;
2922 
2923         /* Now module is in final location, initialize linked lists, etc. */
2924         err = module_unload_init(mod);
2925         if (err)
2926                 goto unlink_mod;
2927 
2928         init_param_lock(mod);
2929 
2930         /*
2931          * Now we've got everything in the final locations, we can
2932          * find optional sections.
2933          */
2934         err = find_module_sections(mod, info);
2935         if (err)
2936                 goto free_unload;
2937 
2938         err = check_export_symbol_versions(mod);
2939         if (err)
2940                 goto free_unload;
2941 
2942         /* Set up MODINFO_ATTR fields */
2943         setup_modinfo(mod, info);
2944 
2945         /* Fix up syms, so that st_value is a pointer to location. */
2946         err = simplify_symbols(mod, info);
2947         if (err < 0)
2948                 goto free_modinfo;
2949 
2950         err = apply_relocations(mod, info);
2951         if (err < 0)
2952                 goto free_modinfo;
2953 
2954         err = post_relocation(mod, info);
2955         if (err < 0)
2956                 goto free_modinfo;
2957 
2958         flush_module_icache(mod);
2959 
2960         /* Now copy in args */
2961         mod->args = strndup_user(uargs, ~0UL >> 1);
2962         if (IS_ERR(mod->args)) {
2963                 err = PTR_ERR(mod->args);
2964                 goto free_arch_cleanup;
2965         }
2966 
2967         init_build_id(mod, info);
2968 
2969         /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
2970         ftrace_module_init(mod);
2971 
2972         /* Finally it's fully formed, ready to start executing. */
2973         err = complete_formation(mod, info);
2974         if (err)
2975                 goto ddebug_cleanup;
2976 
2977         err = prepare_coming_module(mod);
2978         if (err)
2979                 goto bug_cleanup;
2980 
2981         mod->async_probe_requested = async_probe;
2982 
2983         /* Module is ready to execute: parsing args may do that. */
2984         after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
2985                                   -32768, 32767, mod,
2986                                   unknown_module_param_cb);
2987         if (IS_ERR(after_dashes)) {
2988                 err = PTR_ERR(after_dashes);
2989                 goto coming_cleanup;
2990         } else if (after_dashes) {
2991                 pr_warn("%s: parameters '%s' after `--' ignored\n",
2992                        mod->name, after_dashes);
2993         }
2994 
2995         /* Link in to sysfs. */
2996         err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
2997         if (err < 0)
2998                 goto coming_cleanup;
2999 
3000         if (is_livepatch_module(mod)) {
3001                 err = copy_module_elf(mod, info);
3002                 if (err < 0)
3003                         goto sysfs_cleanup;
3004         }
3005 
3006         /* Get rid of temporary copy. */
3007         free_copy(info, flags);
3008 
3009         codetag_load_module(mod);
3010 
3011         /* Done! */
3012         trace_module_load(mod);
3013 
3014         return do_init_module(mod);
3015 
3016  sysfs_cleanup:
3017         mod_sysfs_teardown(mod);
3018  coming_cleanup:
3019         mod->state = MODULE_STATE_GOING;
3020         destroy_params(mod->kp, mod->num_kp);
3021         blocking_notifier_call_chain(&module_notify_list,
3022                                      MODULE_STATE_GOING, mod);
3023         klp_module_going(mod);
3024  bug_cleanup:
3025         mod->state = MODULE_STATE_GOING;
3026         /* module_bug_cleanup needs module_mutex protection */
3027         mutex_lock(&module_mutex);
3028         module_bug_cleanup(mod);
3029         mutex_unlock(&module_mutex);
3030 
3031  ddebug_cleanup:
3032         ftrace_release_mod(mod);
3033         synchronize_rcu();
3034         kfree(mod->args);
3035  free_arch_cleanup:
3036         module_arch_cleanup(mod);
3037  free_modinfo:
3038         free_modinfo(mod);
3039  free_unload:
3040         module_unload_free(mod);
3041  unlink_mod:
3042         mutex_lock(&module_mutex);
3043         /* Unlink carefully: kallsyms could be walking list. */
3044         list_del_rcu(&mod->list);
3045         mod_tree_remove(mod);
3046         wake_up_all(&module_wq);
3047         /* Wait for RCU-sched synchronizing before releasing mod->list. */
3048         synchronize_rcu();
3049         mutex_unlock(&module_mutex);
3050  free_module:
3051         mod_stat_bump_invalid(info, flags);
3052         /* Free lock-classes; relies on the preceding sync_rcu() */
3053         for_class_mod_mem_type(type, core_data) {
3054                 lockdep_free_key_range(mod->mem[type].base,
3055                                        mod->mem[type].size);
3056         }
3057 
3058         module_deallocate(mod, info);
3059  free_copy:
3060         /*
3061          * The info->len is always set. We distinguish between
3062          * failures once the proper module was allocated and
3063          * before that.
3064          */
3065         if (!module_allocated)
3066                 mod_stat_bump_becoming(info, flags);
3067         free_copy(info, flags);
3068         return err;
3069 }
3070 
3071 SYSCALL_DEFINE3(init_module, void __user *, umod,
3072                 unsigned long, len, const char __user *, uargs)
3073 {
3074         int err;
3075         struct load_info info = { };
3076 
3077         err = may_init_module();
3078         if (err)
3079                 return err;
3080 
3081         pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3082                umod, len, uargs);
3083 
3084         err = copy_module_from_user(umod, len, &info);
3085         if (err) {
3086                 mod_stat_inc(&failed_kreads);
3087                 mod_stat_add_long(len, &invalid_kread_bytes);
3088                 return err;
3089         }
3090 
3091         return load_module(&info, uargs, 0);
3092 }
3093 
3094 struct idempotent {
3095         const void *cookie;
3096         struct hlist_node entry;
3097         struct completion complete;
3098         int ret;
3099 };
3100 
3101 #define IDEM_HASH_BITS 8
3102 static struct hlist_head idem_hash[1 << IDEM_HASH_BITS];
3103 static DEFINE_SPINLOCK(idem_lock);
3104 
3105 static bool idempotent(struct idempotent *u, const void *cookie)
3106 {
3107         int hash = hash_ptr(cookie, IDEM_HASH_BITS);
3108         struct hlist_head *head = idem_hash + hash;
3109         struct idempotent *existing;
3110         bool first;
3111 
3112         u->ret = -EINTR;
3113         u->cookie = cookie;
3114         init_completion(&u->complete);
3115 
3116         spin_lock(&idem_lock);
3117         first = true;
3118         hlist_for_each_entry(existing, head, entry) {
3119                 if (existing->cookie != cookie)
3120                         continue;
3121                 first = false;
3122                 break;
3123         }
3124         hlist_add_head(&u->entry, idem_hash + hash);
3125         spin_unlock(&idem_lock);
3126 
3127         return !first;
3128 }
3129 
3130 /*
3131  * We were the first one with 'cookie' on the list, and we ended
3132  * up completing the operation. We now need to walk the list,
3133  * remove everybody - which includes ourselves - fill in the return
3134  * value, and then complete the operation.
3135  */
3136 static int idempotent_complete(struct idempotent *u, int ret)
3137 {
3138         const void *cookie = u->cookie;
3139         int hash = hash_ptr(cookie, IDEM_HASH_BITS);
3140         struct hlist_head *head = idem_hash + hash;
3141         struct hlist_node *next;
3142         struct idempotent *pos;
3143 
3144         spin_lock(&idem_lock);
3145         hlist_for_each_entry_safe(pos, next, head, entry) {
3146                 if (pos->cookie != cookie)
3147                         continue;
3148                 hlist_del_init(&pos->entry);
3149                 pos->ret = ret;
3150                 complete(&pos->complete);
3151         }
3152         spin_unlock(&idem_lock);
3153         return ret;
3154 }
3155 
3156 /*
3157  * Wait for the idempotent worker.
3158  *
3159  * If we get interrupted, we need to remove ourselves from the
3160  * the idempotent list, and the completion may still come in.
3161  *
3162  * The 'idem_lock' protects against the race, and 'idem.ret' was
3163  * initialized to -EINTR and is thus always the right return
3164  * value even if the idempotent work then completes between
3165  * the wait_for_completion and the cleanup.
3166  */
3167 static int idempotent_wait_for_completion(struct idempotent *u)
3168 {
3169         if (wait_for_completion_interruptible(&u->complete)) {
3170                 spin_lock(&idem_lock);
3171                 if (!hlist_unhashed(&u->entry))
3172                         hlist_del(&u->entry);
3173                 spin_unlock(&idem_lock);
3174         }
3175         return u->ret;
3176 }
3177 
3178 static int init_module_from_file(struct file *f, const char __user * uargs, int flags)
3179 {
3180         struct load_info info = { };
3181         void *buf = NULL;
3182         int len;
3183 
3184         len = kernel_read_file(f, 0, &buf, INT_MAX, NULL, READING_MODULE);
3185         if (len < 0) {
3186                 mod_stat_inc(&failed_kreads);
3187                 return len;
3188         }
3189 
3190         if (flags & MODULE_INIT_COMPRESSED_FILE) {
3191                 int err = module_decompress(&info, buf, len);
3192                 vfree(buf); /* compressed data is no longer needed */
3193                 if (err) {
3194                         mod_stat_inc(&failed_decompress);
3195                         mod_stat_add_long(len, &invalid_decompress_bytes);
3196                         return err;
3197                 }
3198         } else {
3199                 info.hdr = buf;
3200                 info.len = len;
3201         }
3202 
3203         return load_module(&info, uargs, flags);
3204 }
3205 
3206 static int idempotent_init_module(struct file *f, const char __user * uargs, int flags)
3207 {
3208         struct idempotent idem;
3209 
3210         if (!f || !(f->f_mode & FMODE_READ))
3211                 return -EBADF;
3212 
3213         /* Are we the winners of the race and get to do this? */
3214         if (!idempotent(&idem, file_inode(f))) {
3215                 int ret = init_module_from_file(f, uargs, flags);
3216                 return idempotent_complete(&idem, ret);
3217         }
3218 
3219         /*
3220          * Somebody else won the race and is loading the module.
3221          */
3222         return idempotent_wait_for_completion(&idem);
3223 }
3224 
3225 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3226 {
3227         int err;
3228         struct fd f;
3229 
3230         err = may_init_module();
3231         if (err)
3232                 return err;
3233 
3234         pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3235 
3236         if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3237                       |MODULE_INIT_IGNORE_VERMAGIC
3238                       |MODULE_INIT_COMPRESSED_FILE))
3239                 return -EINVAL;
3240 
3241         f = fdget(fd);
3242         err = idempotent_init_module(f.file, uargs, flags);
3243         fdput(f);
3244         return err;
3245 }
3246 
3247 /* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
3248 char *module_flags(struct module *mod, char *buf, bool show_state)
3249 {
3250         int bx = 0;
3251 
3252         BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3253         if (!mod->taints && !show_state)
3254                 goto out;
3255         if (mod->taints ||
3256             mod->state == MODULE_STATE_GOING ||
3257             mod->state == MODULE_STATE_COMING) {
3258                 buf[bx++] = '(';
3259                 bx += module_flags_taint(mod->taints, buf + bx);
3260                 /* Show a - for module-is-being-unloaded */
3261                 if (mod->state == MODULE_STATE_GOING && show_state)
3262                         buf[bx++] = '-';
3263                 /* Show a + for module-is-being-loaded */
3264                 if (mod->state == MODULE_STATE_COMING && show_state)
3265                         buf[bx++] = '+';
3266                 buf[bx++] = ')';
3267         }
3268 out:
3269         buf[bx] = '\0';
3270 
3271         return buf;
3272 }
3273 
3274 /* Given an address, look for it in the module exception tables. */
3275 const struct exception_table_entry *search_module_extables(unsigned long addr)
3276 {
3277         const struct exception_table_entry *e = NULL;
3278         struct module *mod;
3279 
3280         preempt_disable();
3281         mod = __module_address(addr);
3282         if (!mod)
3283                 goto out;
3284 
3285         if (!mod->num_exentries)
3286                 goto out;
3287 
3288         e = search_extable(mod->extable,
3289                            mod->num_exentries,
3290                            addr);
3291 out:
3292         preempt_enable();
3293 
3294         /*
3295          * Now, if we found one, we are running inside it now, hence
3296          * we cannot unload the module, hence no refcnt needed.
3297          */
3298         return e;
3299 }
3300 
3301 /**
3302  * is_module_address() - is this address inside a module?
3303  * @addr: the address to check.
3304  *
3305  * See is_module_text_address() if you simply want to see if the address
3306  * is code (not data).
3307  */
3308 bool is_module_address(unsigned long addr)
3309 {
3310         bool ret;
3311 
3312         preempt_disable();
3313         ret = __module_address(addr) != NULL;
3314         preempt_enable();
3315 
3316         return ret;
3317 }
3318 
3319 /**
3320  * __module_address() - get the module which contains an address.
3321  * @addr: the address.
3322  *
3323  * Must be called with preempt disabled or module mutex held so that
3324  * module doesn't get freed during this.
3325  */
3326 struct module *__module_address(unsigned long addr)
3327 {
3328         struct module *mod;
3329 
3330         if (addr >= mod_tree.addr_min && addr <= mod_tree.addr_max)
3331                 goto lookup;
3332 
3333 #ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
3334         if (addr >= mod_tree.data_addr_min && addr <= mod_tree.data_addr_max)
3335                 goto lookup;
3336 #endif
3337 
3338         return NULL;
3339 
3340 lookup:
3341         module_assert_mutex_or_preempt();
3342 
3343         mod = mod_find(addr, &mod_tree);
3344         if (mod) {
3345                 BUG_ON(!within_module(addr, mod));
3346                 if (mod->state == MODULE_STATE_UNFORMED)
3347                         mod = NULL;
3348         }
3349         return mod;
3350 }
3351 
3352 /**
3353  * is_module_text_address() - is this address inside module code?
3354  * @addr: the address to check.
3355  *
3356  * See is_module_address() if you simply want to see if the address is
3357  * anywhere in a module.  See kernel_text_address() for testing if an
3358  * address corresponds to kernel or module code.
3359  */
3360 bool is_module_text_address(unsigned long addr)
3361 {
3362         bool ret;
3363 
3364         preempt_disable();
3365         ret = __module_text_address(addr) != NULL;
3366         preempt_enable();
3367 
3368         return ret;
3369 }
3370 
3371 /**
3372  * __module_text_address() - get the module whose code contains an address.
3373  * @addr: the address.
3374  *
3375  * Must be called with preempt disabled or module mutex held so that
3376  * module doesn't get freed during this.
3377  */
3378 struct module *__module_text_address(unsigned long addr)
3379 {
3380         struct module *mod = __module_address(addr);
3381         if (mod) {
3382                 /* Make sure it's within the text section. */
3383                 if (!within_module_mem_type(addr, mod, MOD_TEXT) &&
3384                     !within_module_mem_type(addr, mod, MOD_INIT_TEXT))
3385                         mod = NULL;
3386         }
3387         return mod;
3388 }
3389 
3390 /* Don't grab lock, we're oopsing. */
3391 void print_modules(void)
3392 {
3393         struct module *mod;
3394         char buf[MODULE_FLAGS_BUF_SIZE];
3395 
3396         printk(KERN_DEFAULT "Modules linked in:");
3397         /* Most callers should already have preempt disabled, but make sure */
3398         preempt_disable();
3399         list_for_each_entry_rcu(mod, &modules, list) {
3400                 if (mod->state == MODULE_STATE_UNFORMED)
3401                         continue;
3402                 pr_cont(" %s%s", mod->name, module_flags(mod, buf, true));
3403         }
3404 
3405         print_unloaded_tainted_modules();
3406         preempt_enable();
3407         if (last_unloaded_module.name[0])
3408                 pr_cont(" [last unloaded: %s%s]", last_unloaded_module.name,
3409                         last_unloaded_module.taints);
3410         pr_cont("\n");
3411 }
3412 
3413 #ifdef CONFIG_MODULE_DEBUGFS
3414 struct dentry *mod_debugfs_root;
3415 
3416 static int module_debugfs_init(void)
3417 {
3418         mod_debugfs_root = debugfs_create_dir("modules", NULL);
3419         return 0;
3420 }
3421 module_init(module_debugfs_init);
3422 #endif
3423 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php