module.c 102 KB
Newer Older
1
/*
Linus Torvalds's avatar
Linus Torvalds committed
2
   Copyright (C) 2002 Richard Henderson
3
   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
Linus Torvalds's avatar
Linus Torvalds committed
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
*/
19
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/moduleloader.h>
21
#include <linux/ftrace_event.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/init.h>
23
#include <linux/kallsyms.h>
24
#include <linux/file.h>
25
#include <linux/fs.h>
Roland McGrath's avatar
Roland McGrath committed
26
#include <linux/sysfs.h>
27
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
31
#include <linux/proc_fs.h>
32
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
33 34 35 36
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
37
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
38 39 40 41 42 43
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vermagic.h>
#include <linux/notifier.h>
Al Viro's avatar
Al Viro committed
44
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/device.h>
46
#include <linux/string.h>
47
#include <linux/mutex.h>
48
#include <linux/rculist.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
51
#include <asm/mmu_context.h>
52
#include <linux/license.h>
53
#include <asm/sections.h>
54
#include <linux/tracepoint.h>
55
#include <linux/ftrace.h>
56
#include <linux/async.h>
57
#include <linux/percpu.h>
58
#include <linux/kmemleak.h>
59
#include <linux/jump_label.h>
60
#include <linux/pfn.h>
61
#include <linux/bsearch.h>
62
#include <uapi/linux/module.h>
63
#include "module-internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
64

65 66 67
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>

Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * Modules' sections will be aligned on page boundaries
 * to ensure complete separation of code and data, but
 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
 */
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
#endif

/*
 * Given BASE and SIZE this macro calculates the number of pages the
 * memory regions occupies
 */
#define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ?		\
		(PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) -	\
			 PFN_DOWN((unsigned long)BASE) + 1)	\
		: (0UL))

Linus Torvalds's avatar
Linus Torvalds committed
92 93 94
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))

95 96 97 98 99
/*
 * Mutex protects:
 * 1) List of modules (also safely readable with preempt_disable),
 * 2) module_use links,
 * 3) module_addr_min/module_addr_max.
100
 * (delete and add uses RCU list operations). */
101 102
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
103
static LIST_HEAD(modules);
104

105 106
#ifdef CONFIG_MODULES_TREE_LOOKUP

107 108 109 110 111 112 113 114 115 116
/*
 * Use a latched RB-tree for __module_address(); this allows us to use
 * RCU-sched lookups of the address from any context.
 *
 * Because modules have two address ranges: init and core, we need two
 * latch_tree_nodes entries. Therefore we need the back-pointer from
 * mod_tree_node.
 *
 * Because init ranges are short lived we mark them unlikely and have placed
 * them outside the critical cacheline in struct module.
117 118 119 120
 *
 * This is conditional on PERF_EVENTS || TRACING because those can really hit
 * __module_address() hard by doing a lot of stack unwinding; potentially from
 * NMI context.
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
 */

static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
{
	struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
	struct module *mod = mtn->mod;

	if (unlikely(mtn == &mod->mtn_init))
		return (unsigned long)mod->module_init;

	return (unsigned long)mod->module_core;
}

static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
{
	struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
	struct module *mod = mtn->mod;

	if (unlikely(mtn == &mod->mtn_init))
		return (unsigned long)mod->init_size;

	return (unsigned long)mod->core_size;
}

static __always_inline bool
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
{
	return __mod_tree_val(a) < __mod_tree_val(b);
}

static __always_inline int
mod_tree_comp(void *key, struct latch_tree_node *n)
{
	unsigned long val = (unsigned long)key;
	unsigned long start, end;

	start = __mod_tree_val(n);
	if (val < start)
		return -1;

	end = start + __mod_tree_size(n);
	if (val >= end)
		return 1;

	return 0;
}

static const struct latch_tree_ops mod_tree_ops = {
	.less = mod_tree_less,
	.comp = mod_tree_comp,
};

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static struct mod_tree_root {
	struct latch_tree_root root;
	unsigned long addr_min;
	unsigned long addr_max;
} mod_tree __cacheline_aligned = {
	.addr_min = -1UL,
};

#define module_addr_min mod_tree.addr_min
#define module_addr_max mod_tree.addr_max

static noinline void __mod_tree_insert(struct mod_tree_node *node)
{
	latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
}

static void __mod_tree_remove(struct mod_tree_node *node)
{
	latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
}
193 194 195 196 197 198 199 200 201 202

/*
 * These modifications: insert, remove_init and remove; are serialized by the
 * module_mutex.
 */
static void mod_tree_insert(struct module *mod)
{
	mod->mtn_core.mod = mod;
	mod->mtn_init.mod = mod;

203
	__mod_tree_insert(&mod->mtn_core);
204
	if (mod->init_size)
205
		__mod_tree_insert(&mod->mtn_init);
206 207 208 209 210
}

static void mod_tree_remove_init(struct module *mod)
{
	if (mod->init_size)
211
		__mod_tree_remove(&mod->mtn_init);
212 213 214 215
}

static void mod_tree_remove(struct module *mod)
{
216
	__mod_tree_remove(&mod->mtn_core);
217 218 219
	mod_tree_remove_init(mod);
}

220
static struct module *mod_find(unsigned long addr)
221 222 223
{
	struct latch_tree_node *ltn;

224
	ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
225 226 227 228 229 230
	if (!ltn)
		return NULL;

	return container_of(ltn, struct mod_tree_node, node)->mod;
}

231 232
#else /* MODULES_TREE_LOOKUP */

233 234
static unsigned long module_addr_min = -1UL, module_addr_max = 0;

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
static void mod_tree_insert(struct module *mod) { }
static void mod_tree_remove_init(struct module *mod) { }
static void mod_tree_remove(struct module *mod) { }

static struct module *mod_find(unsigned long addr)
{
	struct module *mod;

	list_for_each_entry_rcu(mod, &modules, list) {
		if (within_module(addr, mod))
			return mod;
	}

	return NULL;
}

#endif /* MODULES_TREE_LOOKUP */

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/*
 * Bounds of module text, for speeding up __module_address.
 * Protected by module_mutex.
 */
static void __mod_update_bounds(void *base, unsigned int size)
{
	unsigned long min = (unsigned long)base;
	unsigned long max = min + size;

	if (min < module_addr_min)
		module_addr_min = min;
	if (max > module_addr_max)
		module_addr_max = max;
}

static void mod_update_bounds(struct module *mod)
{
	__mod_update_bounds(mod->module_core, mod->core_size);
	if (mod->init_size)
		__mod_update_bounds(mod->module_init, mod->init_size);
}

275 276 277 278
#ifdef CONFIG_KGDB_KDB
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static void module_assert_mutex(void)
{
	lockdep_assert_held(&module_mutex);
}

static void module_assert_mutex_or_preempt(void)
{
#ifdef CONFIG_LOCKDEP
	if (unlikely(!debug_locks))
		return;

	WARN_ON(!rcu_read_lock_sched_held() &&
		!lockdep_is_held(&module_mutex));
#endif
}

295 296
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
#ifndef CONFIG_MODULE_SIG_FORCE
297 298
module_param(sig_enforce, bool_enable_only, 0644);
#endif /* !CONFIG_MODULE_SIG_FORCE */
Linus Torvalds's avatar
Linus Torvalds committed
299

300 301
/* Block module loading/unloading? */
int modules_disabled = 0;
302
core_param(nomodule, modules_disabled, bint, 0);
303

304 305 306
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);

307
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
Linus Torvalds's avatar
Linus Torvalds committed
308

309
int register_module_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
310
{
311
	return blocking_notifier_chain_register(&module_notify_list, nb);
Linus Torvalds's avatar
Linus Torvalds committed
312 313 314
}
EXPORT_SYMBOL(register_module_notifier);

315
int unregister_module_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
316
{
317
	return blocking_notifier_chain_unregister(&module_notify_list, nb);
Linus Torvalds's avatar
Linus Torvalds committed
318 319 320
}
EXPORT_SYMBOL(unregister_module_notifier);

321 322 323 324
struct load_info {
	Elf_Ehdr *hdr;
	unsigned long len;
	Elf_Shdr *sechdrs;
325
	char *secstrings, *strtab;
Rusty Russell's avatar
Rusty Russell committed
326
	unsigned long symoffs, stroffs;
327 328
	struct _ddebug *debug;
	unsigned int num_debug;
329
	bool sig_ok;
330 331 332 333 334
	struct {
		unsigned int sym, str, mod, vers, info, pcpu;
	} index;
};

335 336
/* We require a truly strong try_module_get(): 0 means failure due to
   ongoing or failed initialization etc. */
Linus Torvalds's avatar
Linus Torvalds committed
337 338
static inline int strong_try_module_get(struct module *mod)
{
339
	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
Linus Torvalds's avatar
Linus Torvalds committed
340
	if (mod && mod->state == MODULE_STATE_COMING)
341 342
		return -EBUSY;
	if (try_module_get(mod))
Linus Torvalds's avatar
Linus Torvalds committed
343
		return 0;
344 345
	else
		return -ENOENT;
Linus Torvalds's avatar
Linus Torvalds committed
346 347
}

348 349
static inline void add_taint_module(struct module *mod, unsigned flag,
				    enum lockdep_ok lockdep_ok)
350
{
351
	add_taint(flag, lockdep_ok);
Andi Kleen's avatar
Andi Kleen committed
352
	mod->taints |= (1U << flag);
353 354
}

355 356 357
/*
 * A thread that wants to hold a reference to a module only while it
 * is running can call this to safely exit.  nfsd and lockd use this.
Linus Torvalds's avatar
Linus Torvalds committed
358 359 360 361 362 363 364
 */
void __module_put_and_exit(struct module *mod, long code)
{
	module_put(mod);
	do_exit(code);
}
EXPORT_SYMBOL(__module_put_and_exit);
365

Linus Torvalds's avatar
Linus Torvalds committed
366
/* Find a module section: 0 means not found. */
367
static unsigned int find_sec(const struct load_info *info, const char *name)
Linus Torvalds's avatar
Linus Torvalds committed
368 369 370
{
	unsigned int i;

371 372
	for (i = 1; i < info->hdr->e_shnum; i++) {
		Elf_Shdr *shdr = &info->sechdrs[i];
Linus Torvalds's avatar
Linus Torvalds committed
373
		/* Alloc bit cleared means "ignore it." */
374 375
		if ((shdr->sh_flags & SHF_ALLOC)
		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
376
			return i;
377
	}
Linus Torvalds's avatar
Linus Torvalds committed
378 379 380
	return 0;
}

381
/* Find a module section, or NULL. */
382
static void *section_addr(const struct load_info *info, const char *name)
383 384
{
	/* Section 0 has sh_addr 0. */
385
	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
386 387 388
}

/* Find a module section, or NULL.  Fill in number of "objects" in section. */
389
static void *section_objs(const struct load_info *info,
390 391 392 393
			  const char *name,
			  size_t object_size,
			  unsigned int *num)
{
394
	unsigned int sec = find_sec(info, name);
395 396

	/* Section 0 has sh_addr 0 and sh_size 0. */
397 398
	*num = info->sechdrs[sec].sh_size / object_size;
	return (void *)info->sechdrs[sec].sh_addr;
399 400
}

Linus Torvalds's avatar
Linus Torvalds committed
401 402 403 404 405
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
406 407
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
Linus Torvalds's avatar
Linus Torvalds committed
408 409
extern const unsigned long __start___kcrctab[];
extern const unsigned long __start___kcrctab_gpl[];
410
extern const unsigned long __start___kcrctab_gpl_future[];
411 412 413 414 415
#ifdef CONFIG_UNUSED_SYMBOLS
extern const struct kernel_symbol __start___ksymtab_unused[];
extern const struct kernel_symbol __stop___ksymtab_unused[];
extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
416 417
extern const unsigned long __start___kcrctab_unused[];
extern const unsigned long __start___kcrctab_unused_gpl[];
418
#endif
Linus Torvalds's avatar
Linus Torvalds committed
419 420 421 422

#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
#else
423
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
Linus Torvalds's avatar
Linus Torvalds committed
424 425
#endif

426 427 428 429 430
static bool each_symbol_in_section(const struct symsearch *arr,
				   unsigned int arrsize,
				   struct module *owner,
				   bool (*fn)(const struct symsearch *syms,
					      struct module *owner,
431
					      void *data),
432
				   void *data)
433
{
434
	unsigned int j;
435

436
	for (j = 0; j < arrsize; j++) {
437 438
		if (fn(&arr[j], owner, data))
			return true;
439
	}
440 441

	return false;
442 443
}

444
/* Returns true as soon as fn returns true, otherwise false. */
445 446 447 448
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
				    struct module *owner,
				    void *data),
			 void *data)
449 450
{
	struct module *mod;
451
	static const struct symsearch arr[] = {
452
		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
453
		  NOT_GPL_ONLY, false },
454
		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
455 456
		  __start___kcrctab_gpl,
		  GPL_ONLY, false },
457
		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
458 459
		  __start___kcrctab_gpl_future,
		  WILL_BE_GPL_ONLY, false },
460
#ifdef CONFIG_UNUSED_SYMBOLS
461
		{ __start___ksymtab_unused, __stop___ksymtab_unused,
462 463
		  __start___kcrctab_unused,
		  NOT_GPL_ONLY, true },
464
		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
465 466
		  __start___kcrctab_unused_gpl,
		  GPL_ONLY, true },
467
#endif
468
	};
469

470 471
	module_assert_mutex_or_preempt();

472 473
	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
		return true;
474

475
	list_for_each_entry_rcu(mod, &modules, list) {
476 477
		struct symsearch arr[] = {
			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
478
			  NOT_GPL_ONLY, false },
479
			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
480 481
			  mod->gpl_crcs,
			  GPL_ONLY, false },
482 483
			{ mod->gpl_future_syms,
			  mod->gpl_future_syms + mod->num_gpl_future_syms,
484 485
			  mod->gpl_future_crcs,
			  WILL_BE_GPL_ONLY, false },
486
#ifdef CONFIG_UNUSED_SYMBOLS
487 488
			{ mod->unused_syms,
			  mod->unused_syms + mod->num_unused_syms,
489 490
			  mod->unused_crcs,
			  NOT_GPL_ONLY, true },
491 492
			{ mod->unused_gpl_syms,
			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
493 494
			  mod->unused_gpl_crcs,
			  GPL_ONLY, true },
495
#endif
496 497
		};

498 499 500
		if (mod->state == MODULE_STATE_UNFORMED)
			continue;

501 502 503 504 505
		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
			return true;
	}
	return false;
}
506
EXPORT_SYMBOL_GPL(each_symbol_section);
507 508 509 510 511 512 513 514 515 516

struct find_symbol_arg {
	/* Input */
	const char *name;
	bool gplok;
	bool warn;

	/* Output */
	struct module *owner;
	const unsigned long *crc;
517
	const struct kernel_symbol *sym;
518 519
};

520 521 522
static bool check_symbol(const struct symsearch *syms,
				 struct module *owner,
				 unsigned int symnum, void *data)
523 524 525 526 527 528 529
{
	struct find_symbol_arg *fsa = data;

	if (!fsa->gplok) {
		if (syms->licence == GPL_ONLY)
			return false;
		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
530 531 532
			pr_warn("Symbol %s is being used by a non-GPL module, "
				"which will not be allowed in the future\n",
				fsa->name);
533
		}
Linus Torvalds's avatar
Linus Torvalds committed
534
	}
535

536
#ifdef CONFIG_UNUSED_SYMBOLS
537
	if (syms->unused && fsa->warn) {
538 539 540
		pr_warn("Symbol %s is marked as UNUSED, however this module is "
			"using it.\n", fsa->name);
		pr_warn("This symbol will go away in the future.\n");
541 542 543
		pr_warn("Please evaluate if this is the right api to use and "
			"if it really is, submit a report to the linux kernel "
			"mailing list together with submitting your code for "
544
			"inclusion.\n");
545
	}
546
#endif
547 548 549

	fsa->owner = owner;
	fsa->crc = symversion(syms->crcs, symnum);
550
	fsa->sym = &syms->start[symnum];
551 552 553
	return true;
}

554 555 556 557 558 559 560 561
static int cmp_name(const void *va, const void *vb)
{
	const char *a;
	const struct kernel_symbol *b;
	a = va; b = vb;
	return strcmp(a, b->name);
}

562 563 564 565 566
static bool find_symbol_in_section(const struct symsearch *syms,
				   struct module *owner,
				   void *data)
{
	struct find_symbol_arg *fsa = data;
567 568 569 570 571 572 573
	struct kernel_symbol *sym;

	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
			sizeof(struct kernel_symbol), cmp_name);

	if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
		return true;
574 575 576 577

	return false;
}

578
/* Find a symbol and return it, along with, (optional) crc and
579
 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
580 581 582 583 584
const struct kernel_symbol *find_symbol(const char *name,
					struct module **owner,
					const unsigned long **crc,
					bool gplok,
					bool warn)
585 586 587 588 589 590 591
{
	struct find_symbol_arg fsa;

	fsa.name = name;
	fsa.gplok = gplok;
	fsa.warn = warn;

592
	if (each_symbol_section(find_symbol_in_section, &fsa)) {
593 594 595 596
		if (owner)
			*owner = fsa.owner;
		if (crc)
			*crc = fsa.crc;
597
		return fsa.sym;
598 599
	}

600
	pr_debug("Failed to find symbol %s\n", name);
601
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
602
}
603
EXPORT_SYMBOL_GPL(find_symbol);
Linus Torvalds's avatar
Linus Torvalds committed
604 605

/* Search for module by name: must hold module_mutex. */
606
static struct module *find_module_all(const char *name, size_t len,
607
				      bool even_unformed)
Linus Torvalds's avatar
Linus Torvalds committed
608 609 610
{
	struct module *mod;

611 612
	module_assert_mutex();

Linus Torvalds's avatar
Linus Torvalds committed
613
	list_for_each_entry(mod, &modules, list) {
614 615
		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
			continue;
616
		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
Linus Torvalds's avatar
Linus Torvalds committed
617 618 619 620
			return mod;
	}
	return NULL;
}
621 622 623

struct module *find_module(const char *name)
{
624
	return find_module_all(name, strlen(name), false);
625
}
626
EXPORT_SYMBOL_GPL(find_module);
Linus Torvalds's avatar
Linus Torvalds committed
627 628

#ifdef CONFIG_SMP
629

630
static inline void __percpu *mod_percpu(struct module *mod)
631
{
632 633
	return mod->percpu;
}
634

Rusty Russell's avatar
Rusty Russell committed
635
static int percpu_modalloc(struct module *mod, struct load_info *info)
636
{
Rusty Russell's avatar
Rusty Russell committed
637 638 639 640 641 642
	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
	unsigned long align = pcpusec->sh_addralign;

	if (!pcpusec->sh_size)
		return 0;

643
	if (align > PAGE_SIZE) {
644 645
		pr_warn("%s: per-cpu alignment %li > %li\n",
			mod->name, align, PAGE_SIZE);
646 647 648
		align = PAGE_SIZE;
	}

Rusty Russell's avatar
Rusty Russell committed
649
	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
650
	if (!mod->percpu) {
651 652
		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
			mod->name, (unsigned long)pcpusec->sh_size);
653 654
		return -ENOMEM;
	}
Rusty Russell's avatar
Rusty Russell committed
655
	mod->percpu_size = pcpusec->sh_size;
656
	return 0;
657 658
}

659
static void percpu_modfree(struct module *mod)
660
{
661
	free_percpu(mod->percpu);
662 663
}

664
static unsigned int find_pcpusec(struct load_info *info)
665
{
666
	return find_sec(info, ".data..percpu");
667 668
}

669 670
static void percpu_modcopy(struct module *mod,
			   const void *from, unsigned long size)
671 672 673 674
{
	int cpu;

	for_each_possible_cpu(cpu)
675
		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
676 677
}

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
/**
 * is_module_percpu_address - test whether address is from module static percpu
 * @addr: address to test
 *
 * Test whether @addr belongs to module static percpu area.
 *
 * RETURNS:
 * %true if @addr is from module static percpu area
 */
bool is_module_percpu_address(unsigned long addr)
{
	struct module *mod;
	unsigned int cpu;

	preempt_disable();

	list_for_each_entry_rcu(mod, &modules, list) {
695 696
		if (mod->state == MODULE_STATE_UNFORMED)
			continue;
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
		if (!mod->percpu_size)
			continue;
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(mod->percpu, cpu);

			if ((void *)addr >= start &&
			    (void *)addr < start + mod->percpu_size) {
				preempt_enable();
				return true;
			}
		}
	}

	preempt_enable();
	return false;
712 713
}

Linus Torvalds's avatar
Linus Torvalds committed
714
#else /* ... !CONFIG_SMP */
715

716
static inline void __percpu *mod_percpu(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
717 718 719
{
	return NULL;
}
Rusty Russell's avatar
Rusty Russell committed
720
static int percpu_modalloc(struct module *mod, struct load_info *info)
721
{
Rusty Russell's avatar
Rusty Russell committed
722 723 724 725
	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
	if (info->sechdrs[info->index.pcpu].sh_size != 0)
		return -ENOMEM;
	return 0;
726 727
}
static inline void percpu_modfree(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
728 729
{
}
730
static unsigned int find_pcpusec(struct load_info *info)
Linus Torvalds's avatar
Linus Torvalds committed
731 732 733
{
	return 0;
}
734 735
static inline void percpu_modcopy(struct module *mod,
				  const void *from, unsigned long size)
Linus Torvalds's avatar
Linus Torvalds committed
736 737 738 739
{
	/* pcpusec should be 0, and size of that section should be 0. */
	BUG_ON(size != 0);
}
740 741 742 743
bool is_module_percpu_address(unsigned long addr)
{
	return false;
}
744

Linus Torvalds's avatar
Linus Torvalds committed
745 746
#endif /* CONFIG_SMP */

747 748 749 750 751 752
#define MODINFO_ATTR(field)	\
static void setup_modinfo_##field(struct module *mod, const char *s)  \
{                                                                     \
	mod->field = kstrdup(s, GFP_KERNEL);                          \
}                                                                     \
static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
753
			struct module_kobject *mk, char *buffer)      \
754
{                                                                     \
755
	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
756 757 758 759 760 761 762
}                                                                     \
static int modinfo_##field##_exists(struct module *mod)               \
{                                                                     \
	return mod->field != NULL;                                    \
}                                                                     \
static void free_modinfo_##field(struct module *mod)                  \
{                                                                     \
763 764
	kfree(mod->field);                                            \
	mod->field = NULL;                                            \
765 766
}                                                                     \
static struct module_attribute modinfo_##field = {                    \
767
	.attr = { .name = __stringify(field), .mode = 0444 },         \
768 769 770 771 772 773 774 775 776
	.show = show_modinfo_##field,                                 \
	.setup = setup_modinfo_##field,                               \
	.test = modinfo_##field##_exists,                             \
	.free = free_modinfo_##field,                                 \
};

MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);

777 778
static char last_unloaded_module[MODULE_NAME_LEN+1];

779
#ifdef CONFIG_MODULE_UNLOAD
780 781 782

EXPORT_TRACEPOINT_SYMBOL(module_get);

783 784 785
/* MODULE_REF_BASE is the base reference count by kmodule loader. */
#define MODULE_REF_BASE	1

Linus Torvalds's avatar
Linus Torvalds committed
786
/* Init the unload section of the module. */
787
static int module_unload_init(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
788
{
789 790 791 792 793
	/*
	 * Initialize reference counter to MODULE_REF_BASE.
	 * refcnt == 0 means module is going.
	 */
	atomic_set(&mod->refcnt, MODULE_REF_BASE);
794

795 796
	INIT_LIST_HEAD(&mod->source_list);
	INIT_LIST_HEAD(&mod->target_list);
797

Linus Torvalds's avatar
Linus Torvalds committed
798
	/* Hold reference count during initialization. */
799
	atomic_inc(&mod->refcnt);
800 801

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
802 803 804 805 806 807 808
}

/* Does a already use b? */
static int already_uses(struct module *a, struct module *b)
{
	struct module_use *use;

809 810
	list_for_each_entry(use, &b->source_list, source_list) {
		if (use->source == a) {
811
			pr_debug("%s uses %s!\n", a->name, b->name);
Linus Torvalds's avatar
Linus Torvalds committed
812 813 814
			return 1;
		}
	}
815
	pr_debug("%s does not use %s!\n", a->name, b->name);
Linus Torvalds's avatar
Linus Torvalds committed
816 817 818
	return 0;
}

819 820 821 822 823 824 825 826 827 828 829
/*
 * Module a uses b
 *  - we add 'a' as a "source", 'b' as a "target" of module use
 *  - the module_use is added to the list of 'b' sources (so
 *    'b' can walk the list to see who sourced them), and of 'a'
 *    targets (so 'a' can see what modules it targets).
 */
static int add_module_usage(struct module *a, struct module *b)
{
	struct module_use *use;

830
	pr_debug("Allocating new usage for %s.\n", a->name);
831 832
	use = kmalloc(sizeof(*use), GFP_ATOMIC);
	if (!use) {
833
		pr_warn("%s: out of memory loading\n", a->name);
834 835 836 837 838 839 840 841 842 843
		return -ENOMEM;
	}

	use->source = a;
	use->target = b;
	list_add(&use->source_list, &b->source_list);
	list_add(&use->target_list, &a->target_list);
	return 0;
}

844
/* Module a uses b: caller needs module_mutex() */
845
int ref_module(struct module *a, struct module *b)
Linus Torvalds's avatar
Linus Torvalds committed
846
{
847
	int err;
Kay Sievers's avatar
Kay Sievers committed
848

849
	if (b == NULL || already_uses(a, b))
850 851
		return 0;

852 853
	/* If module isn't available, we fail. */
	err = strong_try_module_get(b);
854
	if (err)
855
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
856

857 858
	err = add_module_usage(a, b);
	if (err) {
Linus Torvalds's avatar
Linus Torvalds committed
859
		module_put(b);
860
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
861
	}
862
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
863
}
864
EXPORT_SYMBOL_GPL(ref_module);
Linus Torvalds's avatar
Linus Torvalds committed
865 866 867 868

/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
{
869
	struct module_use *use, *tmp;
Linus Torvalds's avatar
Linus Torvalds committed
870

871
	mutex_lock(&module_mutex);
872 873
	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
		struct module *i = use->target;
874
		pr_debug("%s unusing %s\n", mod->name, i->name);
875 876 877 878
		module_put(i);
		list_del(&use->source_list);
		list_del(&use->target_list);
		kfree(use);
Linus Torvalds's avatar
Linus Torvalds committed
879
	}
880
	mutex_unlock(&module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
881 882 883
}

#ifdef CONFIG_MODULE_FORCE_UNLOAD
884
static inline int try_force_unload(unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
885 886 887
{
	int ret = (flags & O_TRUNC);
	if (ret)
888
		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
889 890 891
	return ret;
}
#else
892
static inline int try_force_unload(unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
893 894 895 896 897
{
	return 0;
}
#endif /* CONFIG_MODULE_FORCE_UNLOAD */

898 899
/* Try to release refcount of module, 0 means success. */
static int try_release_module_ref(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
900
{
901
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
902

903 904 905 906 907 908
	/* Try to decrement refcnt which we set at loading */
	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
	BUG_ON(ret < 0);
	if (ret)
		/* Someone can put this right now, recover with checking */
		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
Linus Torvalds's avatar
Linus Torvalds committed
909

910 911
	return ret;
}
Linus Torvalds's avatar
Linus Torvalds committed
912

913 914
static int try_stop_module(struct module *mod, int flags, int *forced)
{
915
	/* If it's not unused, quit unless we're forcing. */
916 917 918
	if (try_release_module_ref(mod) != 0) {
		*forced = try_force_unload(flags);
		if (!(*forced))
Linus Torvalds's avatar
Linus Torvalds committed
919 920 921 922
			return -EWOULDBLOCK;
	}

	/* Mark it as dying. */
923
	mod->state = MODULE_STATE_GOING;
Linus Torvalds's avatar
Linus Torvalds committed
924

925
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
926 927
}

928 929 930 931 932 933 934 935 936 937
/**
 * module_refcount - return the refcount or -1 if unloading
 *
 * @mod:	the module we're checking
 *
 * Returns:
 *	-1 if the module is in the process of unloading
 *	otherwise the number of references in the kernel to the module
 */
int module_refcount(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
938
{
939
	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
Linus Torvalds's avatar
Linus Torvalds committed
940 941 942 943 944 945
}
EXPORT_SYMBOL(module_refcount);

/* This exists whether we can unload or not */
static void free_module(struct module *mod);

946 947
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
		unsigned int, flags)
Linus Torvalds's avatar
Linus Torvalds committed
948 949
{
	struct module *mod;
950
	char name[MODULE_NAME_LEN];
Linus Torvalds's avatar
Linus Torvalds committed
951 952
	int ret, forced = 0;

953
	if (!capable(CAP_SYS_MODULE) || modules_disabled)
954 955 956 957 958 959
		return -EPERM;

	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
		return -EFAULT;
	name[MODULE_NAME_LEN-1] = '\0';

960 961
	if (mutex_lock_interruptible(&module_mutex) != 0)
		return -EINTR;
Linus Torvalds's avatar
Linus Torvalds committed
962 963 964 965 966 967 968

	mod = find_module(name);
	if (!mod) {
		ret = -ENOENT;
		goto out;
	}

969
	if (!list_empty(&mod->source_list)) {
Linus Torvalds's avatar
Linus Torvalds committed
970 971 972 973 974 975 976
		/* Other modules depend on us: get rid of them first. */
		ret = -EWOULDBLOCK;
		goto out;
	}

	/* Doing init or already dying? */
	if (mod->state != MODULE_STATE_LIVE) {
977
		/* FIXME: if (force), slam module count damn the torpedoes */
978
		pr_debug("%s already dying\n", mod->name);
Linus Torvalds's avatar
Linus Torvalds committed
979 980 981 982 983
		ret = -EBUSY;
		goto out;
	}

	/* If it has an init func, it must have an exit func to unload */
984
	if (mod->init && !mod->exit) {
985
		forced = try_force_unload(flags);
Linus Torvalds's avatar
Linus Torvalds committed
986 987 988 989 990 991 992 993 994 995 996 997
		if (!forced) {
			/* This module can't be removed */
			ret = -EBUSY;
			goto out;
		}
	}

	/* Stop the machine so refcounts can't move and disable module. */
	ret = try_stop_module(mod, flags, &forced);
	if (ret != 0)
		goto out;

998
	mutex_unlock(&module_mutex);
Lucas De Marchi's avatar
Lucas De Marchi committed
999
	/* Final destruction now no one is using it. */
1000
	if (mod->exit != NULL)
Linus Torvalds's avatar
Linus Torvalds committed
1001
		mod->exit();
1002 1003
	blocking_notifier_call_chain(&module_notify_list,
				     MODULE_STATE_GOING, mod);
1004
	async_synchronize_full();
1005

1006
	/* Store the name of the last unloaded module for diagnostic purposes */
1007
	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
Linus Torvalds's avatar
Linus Torvalds committed
1008

1009 1010 1011
	free_module(mod);
	return 0;
out:
1012
	mutex_unlock(&module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1013 1014 1015
	return ret;
}

1016
static inline void print_unload_info(struct seq_file *m, struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1017 1018 1019 1020
{
	struct module_use *use;
	int printed_something = 0;

1021
	seq_printf(m, " %i ", module_refcount(mod));
Linus Torvalds's avatar
Linus Torvalds committed
1022

1023 1024 1025 1026
	/*
	 * Always include a trailing , so userspace can differentiate
	 * between this and the old multi-field proc format.
	 */
1027
	list_for_each_entry(use, &mod->source_list, source_list) {
Linus Torvalds's avatar
Linus Torvalds committed
1028
		printed_something = 1;
1029
		seq_printf(m, "%s,", use->source->name);
Linus Torvalds's avatar
Linus Torvalds committed
1030 1031 1032 1033
	}

	if (mod->init != NULL && mod->exit == NULL) {
		printed_something = 1;
1034
		seq_puts(m, "[permanent],");
Linus Torvalds's avatar
Linus Torvalds committed
1035 1036 1037
	}

	if (!printed_something)
1038
		seq_puts(m, "-");
Linus Torvalds's avatar
Linus Torvalds committed
1039 1040 1041 1042 1043 1044
}

void __symbol_put(const char *symbol)
{
	struct module *owner;

Rusty Russell's avatar
Rusty Russell committed
1045
	preempt_disable();
1046
	if (!find_symbol(symbol, &owner, NULL, true, false))
Linus Torvalds's avatar
Linus Torvalds committed
1047 1048
		BUG();
	module_put(owner);
Rusty Russell's avatar
Rusty Russell committed
1049
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
1050 1051 1052
}
EXPORT_SYMBOL(__symbol_put);

1053
/* Note this assumes addr is a function, which it currently always is. */
Linus Torvalds's avatar
Linus Torvalds committed
1054 1055
void symbol_put_addr(void *addr)
{
1056
	struct module *modaddr;
1057
	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
Linus Torvalds's avatar
Linus Torvalds committed
1058

1059
	if (core_kernel_text(a))
1060
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1061

1062 1063
	/* module_text_address is safe here: we're supposed to have reference
	 * to module from symbol_get, so it can't go away. */
1064
	modaddr = __module_text_address(a);
1065
	BUG_ON(!modaddr);
1066
	module_put(modaddr);
Linus Torvalds's avatar
Linus Torvalds committed
1067 1068 1069 1070
}
EXPORT_SYMBOL_GPL(symbol_put_addr);

static ssize_t show_refcnt(struct module_attribute *mattr,
1071
			   struct module_kobject *mk, char *buffer)
Linus Torvalds's avatar
Linus Torvalds committed
1072
{
1073
	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
Linus Torvalds's avatar
Linus Torvalds committed
1074 1075
}

1076 1077
static struct module_attribute modinfo_refcnt =
	__ATTR(refcnt, 0444, show_refcnt, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1078

1079 1080 1081 1082
void __module_get(struct module *module)
{
	if (module) {
		preempt_disable();
1083
		atomic_inc(&module->refcnt);
1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
		trace_module_get(module, _RET_IP_);
		preempt_enable();
	}
}
EXPORT_SYMBOL(__module_get);

bool try_module_get(struct module *module)
{
	bool ret = true;

	if (module) {
		preempt_disable();
1096 1097 1098
		/* Note: here, we can fail to get a reference */
		if (likely(module_is_live(module) &&
			   atomic_inc_not_zero(&module->refcnt) != 0))
1099
			trace_module_get(module, _RET_IP_);
1100
		else
1101 1102 1103 1104 1105 1106 1107 1108
			ret = false;

		preempt_enable();
	}
	return ret;
}
EXPORT_SYMBOL(try_module_get);

Al Viro's avatar
Al Viro committed
1109 1110
void module_put(struct module *module)
{
1111 1112
	int ret;

Al Viro's avatar
Al Viro committed
1113
	if (module) {
1114
		preempt_disable();
1115 1116
		ret = atomic_dec_if_positive(&module->refcnt);
		WARN_ON(ret < 0);	/* Failed to put refcount */
1117
		trace_module_put(module, _RET_IP_);
1118
		preempt_enable();
Al Viro's avatar
Al Viro committed
1119 1120 1121 1122
	}
}
EXPORT_SYMBOL(module_put);

Linus Torvalds's avatar
Linus Torvalds committed
1123
#else /* !CONFIG_MODULE_UNLOAD */
1124
static inline void print_unload_info(struct seq_file *m, struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1125 1126
{
	/* We don't know the usage count, or what modules are using. */
1127
	seq_puts(m, " - -");
Linus Torvalds's avatar
Linus Torvalds committed
1128 1129 1130 1131 1132 1133
}

static inline void module_unload_free(struct module *mod)
{
}

1134
int ref_module(struct module *a, struct module *b)
Linus Torvalds's avatar
Linus Torvalds committed
1135
{
1136
	return strong_try_module_get(b);
Linus Torvalds's avatar
Linus Torvalds committed
1137
}
1138
EXPORT_SYMBOL_GPL(ref_module);
Linus Torvalds's avatar
Linus Torvalds committed
1139

1140
static inline int module_unload_init(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1141
{
1142
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1143 1144 1145
}
#endif /* CONFIG_MODULE_UNLOAD */

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
static size_t module_flags_taint(struct module *mod, char *buf)
{
	size_t l = 0;

	if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
		buf[l++] = 'P';
	if (mod->taints & (1 << TAINT_OOT_MODULE))
		buf[l++] = 'O';
	if (mod->taints & (1 << TAINT_FORCED_MODULE))
		buf[l++] = 'F';
	if (mod->taints & (1 << TAINT_CRAP))
		buf[l++] = 'C';
1158
	if (mod->taints & (1 << TAINT_UNSIGNED_MODULE))
1159
		buf[l++] = 'E';
1160 1161
	/*
	 * TAINT_FORCED_RMMOD: could be added.
1162
	 * TAINT_CPU_OUT_OF_SPEC, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1163 1164 1165 1166 1167
	 * apply to modules.
	 */
	return l;
}