module.c 102 KB
Newer Older
1
/*
Linus Torvalds's avatar
Linus Torvalds committed
2
   Copyright (C) 2002 Richard Henderson
3
   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
Linus Torvalds's avatar
Linus Torvalds committed
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
*/
19
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/moduleloader.h>
21
#include <linux/ftrace_event.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/init.h>
23
#include <linux/kallsyms.h>
24
#include <linux/file.h>
25
#include <linux/fs.h>
Roland McGrath's avatar
Roland McGrath committed
26
#include <linux/sysfs.h>
27
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
31
#include <linux/proc_fs.h>
32
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
33 34 35 36
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
37
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
38 39 40 41 42 43
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vermagic.h>
#include <linux/notifier.h>
Al Viro's avatar
Al Viro committed
44
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/device.h>
46
#include <linux/string.h>
Arjan van de Ven's avatar
Arjan van de Ven committed
47
#include <linux/mutex.h>
48
#include <linux/rculist.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
51
#include <asm/mmu_context.h>
52
#include <linux/license.h>
53
#include <asm/sections.h>
54
#include <linux/tracepoint.h>
55
#include <linux/ftrace.h>
56
#include <linux/async.h>
57
#include <linux/percpu.h>
58
#include <linux/kmemleak.h>
59
#include <linux/jump_label.h>
60
#include <linux/pfn.h>
61
#include <linux/bsearch.h>
62
#include <uapi/linux/module.h>
63
#include "module-internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
64

65 66 67
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>

Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
/*
 * Modules' sections will be aligned on page boundaries
 * to ensure complete separation of code and data, but
 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
 */
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
#endif

/*
 * Given BASE and SIZE this macro calculates the number of pages the
 * memory regions occupies
 */
#define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ?		\
		(PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) -	\
			 PFN_DOWN((unsigned long)BASE) + 1)	\
		: (0UL))

Linus Torvalds's avatar
Linus Torvalds committed
92 93 94
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))

95 96 97 98 99
/*
 * Mutex protects:
 * 1) List of modules (also safely readable with preempt_disable),
 * 2) module_use links,
 * 3) module_addr_min/module_addr_max.
100
 * (delete and add uses RCU list operations). */
101 102
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
103
static LIST_HEAD(modules);
104

105 106
#ifdef CONFIG_MODULES_TREE_LOOKUP

107 108 109 110 111 112 113 114 115 116
/*
 * Use a latched RB-tree for __module_address(); this allows us to use
 * RCU-sched lookups of the address from any context.
 *
 * Because modules have two address ranges: init and core, we need two
 * latch_tree_nodes entries. Therefore we need the back-pointer from
 * mod_tree_node.
 *
 * Because init ranges are short lived we mark them unlikely and have placed
 * them outside the critical cacheline in struct module.
117 118 119 120
 *
 * This is conditional on PERF_EVENTS || TRACING because those can really hit
 * __module_address() hard by doing a lot of stack unwinding; potentially from
 * NMI context.
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
 */

static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
{
	struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
	struct module *mod = mtn->mod;

	if (unlikely(mtn == &mod->mtn_init))
		return (unsigned long)mod->module_init;

	return (unsigned long)mod->module_core;
}

static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
{
	struct mod_tree_node *mtn = container_of(n, struct mod_tree_node, node);
	struct module *mod = mtn->mod;

	if (unlikely(mtn == &mod->mtn_init))
		return (unsigned long)mod->init_size;

	return (unsigned long)mod->core_size;
}

static __always_inline bool
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
{
	return __mod_tree_val(a) < __mod_tree_val(b);
}

static __always_inline int
mod_tree_comp(void *key, struct latch_tree_node *n)
{
	unsigned long val = (unsigned long)key;
	unsigned long start, end;

	start = __mod_tree_val(n);
	if (val < start)
		return -1;

	end = start + __mod_tree_size(n);
	if (val >= end)
		return 1;

	return 0;
}

static const struct latch_tree_ops mod_tree_ops = {
	.less = mod_tree_less,
	.comp = mod_tree_comp,
};

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
static struct mod_tree_root {
	struct latch_tree_root root;
	unsigned long addr_min;
	unsigned long addr_max;
} mod_tree __cacheline_aligned = {
	.addr_min = -1UL,
};

#define module_addr_min mod_tree.addr_min
#define module_addr_max mod_tree.addr_max

static noinline void __mod_tree_insert(struct mod_tree_node *node)
{
	latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
}

static void __mod_tree_remove(struct mod_tree_node *node)
{
	latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
}
193 194 195 196 197 198 199 200 201 202

/*
 * These modifications: insert, remove_init and remove; are serialized by the
 * module_mutex.
 */
static void mod_tree_insert(struct module *mod)
{
	mod->mtn_core.mod = mod;
	mod->mtn_init.mod = mod;

203
	__mod_tree_insert(&mod->mtn_core);
204
	if (mod->init_size)
205
		__mod_tree_insert(&mod->mtn_init);
206 207 208 209 210
}

static void mod_tree_remove_init(struct module *mod)
{
	if (mod->init_size)
211
		__mod_tree_remove(&mod->mtn_init);
212 213 214 215
}

static void mod_tree_remove(struct module *mod)
{
216
	__mod_tree_remove(&mod->mtn_core);
217 218 219
	mod_tree_remove_init(mod);
}

220
static struct module *mod_find(unsigned long addr)
221 222 223
{
	struct latch_tree_node *ltn;

224
	ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
225 226 227 228 229 230
	if (!ltn)
		return NULL;

	return container_of(ltn, struct mod_tree_node, node)->mod;
}

231 232
#else /* MODULES_TREE_LOOKUP */

233 234
static unsigned long module_addr_min = -1UL, module_addr_max = 0;

235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
static void mod_tree_insert(struct module *mod) { }
static void mod_tree_remove_init(struct module *mod) { }
static void mod_tree_remove(struct module *mod) { }

static struct module *mod_find(unsigned long addr)
{
	struct module *mod;

	list_for_each_entry_rcu(mod, &modules, list) {
		if (within_module(addr, mod))
			return mod;
	}

	return NULL;
}

#endif /* MODULES_TREE_LOOKUP */

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
/*
 * Bounds of module text, for speeding up __module_address.
 * Protected by module_mutex.
 */
static void __mod_update_bounds(void *base, unsigned int size)
{
	unsigned long min = (unsigned long)base;
	unsigned long max = min + size;

	if (min < module_addr_min)
		module_addr_min = min;
	if (max > module_addr_max)
		module_addr_max = max;
}

static void mod_update_bounds(struct module *mod)
{
	__mod_update_bounds(mod->module_core, mod->core_size);
	if (mod->init_size)
		__mod_update_bounds(mod->module_init, mod->init_size);
}

275 276 277 278
#ifdef CONFIG_KGDB_KDB
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */

279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
static void module_assert_mutex(void)
{
	lockdep_assert_held(&module_mutex);
}

static void module_assert_mutex_or_preempt(void)
{
#ifdef CONFIG_LOCKDEP
	if (unlikely(!debug_locks))
		return;

	WARN_ON(!rcu_read_lock_sched_held() &&
		!lockdep_is_held(&module_mutex));
#endif
}

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
#ifdef CONFIG_MODULE_SIG
#ifdef CONFIG_MODULE_SIG_FORCE
static bool sig_enforce = true;
#else
static bool sig_enforce = false;

static int param_set_bool_enable_only(const char *val,
				      const struct kernel_param *kp)
{
	int err;
	bool test;
	struct kernel_param dummy_kp = *kp;

	dummy_kp.arg = &test;

	err = param_set_bool(val, &dummy_kp);
	if (err)
		return err;

	/* Don't let them unset it once it's set! */
	if (!test && sig_enforce)
		return -EROFS;

	if (test)
		sig_enforce = true;
	return 0;
}

static const struct kernel_param_ops param_ops_bool_enable_only = {
324
	.flags = KERNEL_PARAM_OPS_FL_NOARG,
325 326 327 328 329 330 331 332
	.set = param_set_bool_enable_only,
	.get = param_get_bool,
};
#define param_check_bool_enable_only param_check_bool

module_param(sig_enforce, bool_enable_only, 0644);
#endif /* !CONFIG_MODULE_SIG_FORCE */
#endif /* CONFIG_MODULE_SIG */
Linus Torvalds's avatar
Linus Torvalds committed
333

334 335
/* Block module loading/unloading? */
int modules_disabled = 0;
336
core_param(nomodule, modules_disabled, bint, 0);
337

338 339 340
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);

341
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
Linus Torvalds's avatar
Linus Torvalds committed
342

343
int register_module_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
344
{
345
	return blocking_notifier_chain_register(&module_notify_list, nb);
Linus Torvalds's avatar
Linus Torvalds committed
346 347 348
}
EXPORT_SYMBOL(register_module_notifier);

349
int unregister_module_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
350
{
351
	return blocking_notifier_chain_unregister(&module_notify_list, nb);
Linus Torvalds's avatar
Linus Torvalds committed
352 353 354
}
EXPORT_SYMBOL(unregister_module_notifier);

355 356 357 358
struct load_info {
	Elf_Ehdr *hdr;
	unsigned long len;
	Elf_Shdr *sechdrs;
359
	char *secstrings, *strtab;
Rusty Russell's avatar
Rusty Russell committed
360
	unsigned long symoffs, stroffs;
361 362
	struct _ddebug *debug;
	unsigned int num_debug;
363
	bool sig_ok;
364 365 366 367 368
	struct {
		unsigned int sym, str, mod, vers, info, pcpu;
	} index;
};

369 370
/* We require a truly strong try_module_get(): 0 means failure due to
   ongoing or failed initialization etc. */
Linus Torvalds's avatar
Linus Torvalds committed
371 372
static inline int strong_try_module_get(struct module *mod)
{
373
	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
Linus Torvalds's avatar
Linus Torvalds committed
374
	if (mod && mod->state == MODULE_STATE_COMING)
375 376
		return -EBUSY;
	if (try_module_get(mod))
Linus Torvalds's avatar
Linus Torvalds committed
377
		return 0;
378 379
	else
		return -ENOENT;
Linus Torvalds's avatar
Linus Torvalds committed
380 381
}

382 383
static inline void add_taint_module(struct module *mod, unsigned flag,
				    enum lockdep_ok lockdep_ok)
384
{
385
	add_taint(flag, lockdep_ok);
Andi Kleen's avatar
Andi Kleen committed
386
	mod->taints |= (1U << flag);
387 388
}

389 390 391
/*
 * A thread that wants to hold a reference to a module only while it
 * is running can call this to safely exit.  nfsd and lockd use this.
Linus Torvalds's avatar
Linus Torvalds committed
392 393 394 395 396 397 398
 */
void __module_put_and_exit(struct module *mod, long code)
{
	module_put(mod);
	do_exit(code);
}
EXPORT_SYMBOL(__module_put_and_exit);
399

Linus Torvalds's avatar
Linus Torvalds committed
400
/* Find a module section: 0 means not found. */
401
static unsigned int find_sec(const struct load_info *info, const char *name)
Linus Torvalds's avatar
Linus Torvalds committed
402 403 404
{
	unsigned int i;

405 406
	for (i = 1; i < info->hdr->e_shnum; i++) {
		Elf_Shdr *shdr = &info->sechdrs[i];
Linus Torvalds's avatar
Linus Torvalds committed
407
		/* Alloc bit cleared means "ignore it." */
408 409
		if ((shdr->sh_flags & SHF_ALLOC)
		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
410
			return i;
411
	}
Linus Torvalds's avatar
Linus Torvalds committed
412 413 414
	return 0;
}

Rusty Russell's avatar
Rusty Russell committed
415
/* Find a module section, or NULL. */
416
static void *section_addr(const struct load_info *info, const char *name)
Rusty Russell's avatar
Rusty Russell committed
417 418
{
	/* Section 0 has sh_addr 0. */
419
	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
Rusty Russell's avatar
Rusty Russell committed
420 421 422
}

/* Find a module section, or NULL.  Fill in number of "objects" in section. */
423
static void *section_objs(const struct load_info *info,
Rusty Russell's avatar
Rusty Russell committed
424 425 426 427
			  const char *name,
			  size_t object_size,
			  unsigned int *num)
{
428
	unsigned int sec = find_sec(info, name);
Rusty Russell's avatar
Rusty Russell committed
429 430

	/* Section 0 has sh_addr 0 and sh_size 0. */
431 432
	*num = info->sechdrs[sec].sh_size / object_size;
	return (void *)info->sechdrs[sec].sh_addr;
Rusty Russell's avatar
Rusty Russell committed
433 434
}

Linus Torvalds's avatar
Linus Torvalds committed
435 436 437 438 439
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
440 441
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
Linus Torvalds's avatar
Linus Torvalds committed
442 443
extern const unsigned long __start___kcrctab[];
extern const unsigned long __start___kcrctab_gpl[];
444
extern const unsigned long __start___kcrctab_gpl_future[];
445 446 447 448 449
#ifdef CONFIG_UNUSED_SYMBOLS
extern const struct kernel_symbol __start___ksymtab_unused[];
extern const struct kernel_symbol __stop___ksymtab_unused[];
extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
450 451
extern const unsigned long __start___kcrctab_unused[];
extern const unsigned long __start___kcrctab_unused_gpl[];
452
#endif
Linus Torvalds's avatar
Linus Torvalds committed
453 454 455 456

#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
#else
Andrew Morton's avatar
Andrew Morton committed
457
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
Linus Torvalds's avatar
Linus Torvalds committed
458 459
#endif

460 461 462 463 464
static bool each_symbol_in_section(const struct symsearch *arr,
				   unsigned int arrsize,
				   struct module *owner,
				   bool (*fn)(const struct symsearch *syms,
					      struct module *owner,
465
					      void *data),
466
				   void *data)
467
{
468
	unsigned int j;
469

470
	for (j = 0; j < arrsize; j++) {
471 472
		if (fn(&arr[j], owner, data))
			return true;
473
	}
474 475

	return false;
476 477
}

478
/* Returns true as soon as fn returns true, otherwise false. */
479 480 481 482
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
				    struct module *owner,
				    void *data),
			 void *data)
483 484
{
	struct module *mod;
485
	static const struct symsearch arr[] = {
486
		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
487
		  NOT_GPL_ONLY, false },
488
		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
489 490
		  __start___kcrctab_gpl,
		  GPL_ONLY, false },
491
		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
492 493
		  __start___kcrctab_gpl_future,
		  WILL_BE_GPL_ONLY, false },
494
#ifdef CONFIG_UNUSED_SYMBOLS
495
		{ __start___ksymtab_unused, __stop___ksymtab_unused,
496 497
		  __start___kcrctab_unused,
		  NOT_GPL_ONLY, true },
498
		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
499 500
		  __start___kcrctab_unused_gpl,
		  GPL_ONLY, true },
501
#endif
502
	};
503

504 505
	module_assert_mutex_or_preempt();

506 507
	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
		return true;
508

509
	list_for_each_entry_rcu(mod, &modules, list) {
510 511
		struct symsearch arr[] = {
			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
512
			  NOT_GPL_ONLY, false },
513
			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
514 515
			  mod->gpl_crcs,
			  GPL_ONLY, false },
516 517
			{ mod->gpl_future_syms,
			  mod->gpl_future_syms + mod->num_gpl_future_syms,
518 519
			  mod->gpl_future_crcs,
			  WILL_BE_GPL_ONLY, false },
520
#ifdef CONFIG_UNUSED_SYMBOLS
521 522
			{ mod->unused_syms,
			  mod->unused_syms + mod->num_unused_syms,
523 524
			  mod->unused_crcs,
			  NOT_GPL_ONLY, true },
525 526
			{ mod->unused_gpl_syms,
			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
527 528
			  mod->unused_gpl_crcs,
			  GPL_ONLY, true },
529
#endif
530 531
		};

532 533 534
		if (mod->state == MODULE_STATE_UNFORMED)
			continue;

535 536 537 538 539
		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
			return true;
	}
	return false;
}
540
EXPORT_SYMBOL_GPL(each_symbol_section);
541 542 543 544 545 546 547 548 549 550

struct find_symbol_arg {
	/* Input */
	const char *name;
	bool gplok;
	bool warn;

	/* Output */
	struct module *owner;
	const unsigned long *crc;
551
	const struct kernel_symbol *sym;
552 553
};

554 555 556
static bool check_symbol(const struct symsearch *syms,
				 struct module *owner,
				 unsigned int symnum, void *data)
557 558 559 560 561 562 563
{
	struct find_symbol_arg *fsa = data;

	if (!fsa->gplok) {
		if (syms->licence == GPL_ONLY)
			return false;
		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
Andrew Morton's avatar
Andrew Morton committed
564 565 566
			pr_warn("Symbol %s is being used by a non-GPL module, "
				"which will not be allowed in the future\n",
				fsa->name);
567
		}
Linus Torvalds's avatar
Linus Torvalds committed
568
	}
569

570
#ifdef CONFIG_UNUSED_SYMBOLS
571
	if (syms->unused && fsa->warn) {
Andrew Morton's avatar
Andrew Morton committed
572 573 574
		pr_warn("Symbol %s is marked as UNUSED, however this module is "
			"using it.\n", fsa->name);
		pr_warn("This symbol will go away in the future.\n");
575 576 577
		pr_warn("Please evaluate if this is the right api to use and "
			"if it really is, submit a report to the linux kernel "
			"mailing list together with submitting your code for "
Andrew Morton's avatar
Andrew Morton committed
578
			"inclusion.\n");
579
	}
580
#endif
581 582 583

	fsa->owner = owner;
	fsa->crc = symversion(syms->crcs, symnum);
584
	fsa->sym = &syms->start[symnum];
585 586 587
	return true;
}

588 589 590 591 592 593 594 595
static int cmp_name(const void *va, const void *vb)
{
	const char *a;
	const struct kernel_symbol *b;
	a = va; b = vb;
	return strcmp(a, b->name);
}

596 597 598 599 600
static bool find_symbol_in_section(const struct symsearch *syms,
				   struct module *owner,
				   void *data)
{
	struct find_symbol_arg *fsa = data;
601 602 603 604 605 606 607
	struct kernel_symbol *sym;

	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
			sizeof(struct kernel_symbol), cmp_name);

	if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
		return true;
608 609 610 611

	return false;
}

612
/* Find a symbol and return it, along with, (optional) crc and
613
 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
614 615 616 617 618
const struct kernel_symbol *find_symbol(const char *name,
					struct module **owner,
					const unsigned long **crc,
					bool gplok,
					bool warn)
619 620 621 622 623 624 625
{
	struct find_symbol_arg fsa;

	fsa.name = name;
	fsa.gplok = gplok;
	fsa.warn = warn;

626
	if (each_symbol_section(find_symbol_in_section, &fsa)) {
627 628 629 630
		if (owner)
			*owner = fsa.owner;
		if (crc)
			*crc = fsa.crc;
631
		return fsa.sym;
632 633
	}

634
	pr_debug("Failed to find symbol %s\n", name);
635
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
636
}
637
EXPORT_SYMBOL_GPL(find_symbol);
Linus Torvalds's avatar
Linus Torvalds committed
638 639

/* Search for module by name: must hold module_mutex. */
640
static struct module *find_module_all(const char *name, size_t len,
641
				      bool even_unformed)
Linus Torvalds's avatar
Linus Torvalds committed
642 643 644
{
	struct module *mod;

645 646
	module_assert_mutex();

Linus Torvalds's avatar
Linus Torvalds committed
647
	list_for_each_entry(mod, &modules, list) {
648 649
		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
			continue;
650
		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
Linus Torvalds's avatar
Linus Torvalds committed
651 652 653 654
			return mod;
	}
	return NULL;
}
655 656 657

struct module *find_module(const char *name)
{
658
	return find_module_all(name, strlen(name), false);
659
}
660
EXPORT_SYMBOL_GPL(find_module);
Linus Torvalds's avatar
Linus Torvalds committed
661 662

#ifdef CONFIG_SMP
663

664
static inline void __percpu *mod_percpu(struct module *mod)
665
{
666 667
	return mod->percpu;
}
668

Rusty Russell's avatar
Rusty Russell committed
669
static int percpu_modalloc(struct module *mod, struct load_info *info)
670
{
Rusty Russell's avatar
Rusty Russell committed
671 672 673 674 675 676
	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
	unsigned long align = pcpusec->sh_addralign;

	if (!pcpusec->sh_size)
		return 0;

677
	if (align > PAGE_SIZE) {
Andrew Morton's avatar
Andrew Morton committed
678 679
		pr_warn("%s: per-cpu alignment %li > %li\n",
			mod->name, align, PAGE_SIZE);
680 681 682
		align = PAGE_SIZE;
	}

Rusty Russell's avatar
Rusty Russell committed
683
	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
684
	if (!mod->percpu) {
Andrew Morton's avatar
Andrew Morton committed
685 686
		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
			mod->name, (unsigned long)pcpusec->sh_size);
687 688
		return -ENOMEM;
	}
Rusty Russell's avatar
Rusty Russell committed
689
	mod->percpu_size = pcpusec->sh_size;
690
	return 0;
691 692
}

693
static void percpu_modfree(struct module *mod)
694
{
695
	free_percpu(mod->percpu);
696 697
}

698
static unsigned int find_pcpusec(struct load_info *info)
699
{
700
	return find_sec(info, ".data..percpu");
701 702
}

703 704
static void percpu_modcopy(struct module *mod,
			   const void *from, unsigned long size)
705 706 707 708
{
	int cpu;

	for_each_possible_cpu(cpu)
709
		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
710 711
}

712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728
/**
 * is_module_percpu_address - test whether address is from module static percpu
 * @addr: address to test
 *
 * Test whether @addr belongs to module static percpu area.
 *
 * RETURNS:
 * %true if @addr is from module static percpu area
 */
bool is_module_percpu_address(unsigned long addr)
{
	struct module *mod;
	unsigned int cpu;

	preempt_disable();

	list_for_each_entry_rcu(mod, &modules, list) {
729 730
		if (mod->state == MODULE_STATE_UNFORMED)
			continue;
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
		if (!mod->percpu_size)
			continue;
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(mod->percpu, cpu);

			if ((void *)addr >= start &&
			    (void *)addr < start + mod->percpu_size) {
				preempt_enable();
				return true;
			}
		}
	}

	preempt_enable();
	return false;
746 747
}

Linus Torvalds's avatar
Linus Torvalds committed
748
#else /* ... !CONFIG_SMP */
749

750
static inline void __percpu *mod_percpu(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
751 752 753
{
	return NULL;
}
Rusty Russell's avatar
Rusty Russell committed
754
static int percpu_modalloc(struct module *mod, struct load_info *info)
755
{
Rusty Russell's avatar
Rusty Russell committed
756 757 758 759
	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
	if (info->sechdrs[info->index.pcpu].sh_size != 0)
		return -ENOMEM;
	return 0;
760 761
}
static inline void percpu_modfree(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
762 763
{
}
764
static unsigned int find_pcpusec(struct load_info *info)
Linus Torvalds's avatar
Linus Torvalds committed
765 766 767
{
	return 0;
}
768 769
static inline void percpu_modcopy(struct module *mod,
				  const void *from, unsigned long size)
Linus Torvalds's avatar
Linus Torvalds committed
770 771 772 773
{
	/* pcpusec should be 0, and size of that section should be 0. */
	BUG_ON(size != 0);
}
774 775 776 777
bool is_module_percpu_address(unsigned long addr)
{
	return false;
}
778

Linus Torvalds's avatar
Linus Torvalds committed
779 780
#endif /* CONFIG_SMP */

781 782 783 784 785 786
#define MODINFO_ATTR(field)	\
static void setup_modinfo_##field(struct module *mod, const char *s)  \
{                                                                     \
	mod->field = kstrdup(s, GFP_KERNEL);                          \
}                                                                     \
static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
787
			struct module_kobject *mk, char *buffer)      \
788
{                                                                     \
789
	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
790 791 792 793 794 795 796
}                                                                     \
static int modinfo_##field##_exists(struct module *mod)               \
{                                                                     \
	return mod->field != NULL;                                    \
}                                                                     \
static void free_modinfo_##field(struct module *mod)                  \
{                                                                     \
797 798
	kfree(mod->field);                                            \
	mod->field = NULL;                                            \
799 800
}                                                                     \
static struct module_attribute modinfo_##field = {                    \
801
	.attr = { .name = __stringify(field), .mode = 0444 },         \
802 803 804 805 806 807 808 809 810
	.show = show_modinfo_##field,                                 \
	.setup = setup_modinfo_##field,                               \
	.test = modinfo_##field##_exists,                             \
	.free = free_modinfo_##field,                                 \
};

MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);

811 812
static char last_unloaded_module[MODULE_NAME_LEN+1];

813
#ifdef CONFIG_MODULE_UNLOAD
814 815 816

EXPORT_TRACEPOINT_SYMBOL(module_get);

817 818 819
/* MODULE_REF_BASE is the base reference count by kmodule loader. */
#define MODULE_REF_BASE	1

Linus Torvalds's avatar
Linus Torvalds committed
820
/* Init the unload section of the module. */
821
static int module_unload_init(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
822
{
823 824 825 826 827
	/*
	 * Initialize reference counter to MODULE_REF_BASE.
	 * refcnt == 0 means module is going.
	 */
	atomic_set(&mod->refcnt, MODULE_REF_BASE);
828

829 830
	INIT_LIST_HEAD(&mod->source_list);
	INIT_LIST_HEAD(&mod->target_list);
831

Linus Torvalds's avatar
Linus Torvalds committed
832
	/* Hold reference count during initialization. */
833
	atomic_inc(&mod->refcnt);
834 835

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
836 837 838 839 840 841 842
}

/* Does a already use b? */
static int already_uses(struct module *a, struct module *b)
{
	struct module_use *use;

843 844
	list_for_each_entry(use, &b->source_list, source_list) {
		if (use->source == a) {
845
			pr_debug("%s uses %s!\n", a->name, b->name);
Linus Torvalds's avatar
Linus Torvalds committed
846 847 848
			return 1;
		}
	}
849
	pr_debug("%s does not use %s!\n", a->name, b->name);
Linus Torvalds's avatar
Linus Torvalds committed
850 851 852
	return 0;
}

853 854 855 856 857 858 859 860 861 862 863
/*
 * Module a uses b
 *  - we add 'a' as a "source", 'b' as a "target" of module use
 *  - the module_use is added to the list of 'b' sources (so
 *    'b' can walk the list to see who sourced them), and of 'a'
 *    targets (so 'a' can see what modules it targets).
 */
static int add_module_usage(struct module *a, struct module *b)
{
	struct module_use *use;

864
	pr_debug("Allocating new usage for %s.\n", a->name);
865 866
	use = kmalloc(sizeof(*use), GFP_ATOMIC);
	if (!use) {
Andrew Morton's avatar
Andrew Morton committed
867
		pr_warn("%s: out of memory loading\n", a->name);
868 869 870 871 872 873 874 875 876 877
		return -ENOMEM;
	}

	use->source = a;
	use->target = b;
	list_add(&use->source_list, &b->source_list);
	list_add(&use->target_list, &a->target_list);
	return 0;
}

878
/* Module a uses b: caller needs module_mutex() */
879
int ref_module(struct module *a, struct module *b)
Linus Torvalds's avatar
Linus Torvalds committed
880
{
881
	int err;
Kay Sievers's avatar
Kay Sievers committed
882

883
	if (b == NULL || already_uses(a, b))
884 885
		return 0;

886 887
	/* If module isn't available, we fail. */
	err = strong_try_module_get(b);
888
	if (err)
889
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
890

891 892
	err = add_module_usage(a, b);
	if (err) {
Linus Torvalds's avatar
Linus Torvalds committed
893
		module_put(b);
894
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
895
	}
896
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
897
}
898
EXPORT_SYMBOL_GPL(ref_module);
Linus Torvalds's avatar
Linus Torvalds committed
899 900 901 902

/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
{
903
	struct module_use *use, *tmp;
Linus Torvalds's avatar
Linus Torvalds committed
904

905
	mutex_lock(&module_mutex);
906 907
	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
		struct module *i = use->target;
908
		pr_debug("%s unusing %s\n", mod->name, i->name);
909 910 911 912
		module_put(i);
		list_del(&use->source_list);
		list_del(&use->target_list);
		kfree(use);
Linus Torvalds's avatar
Linus Torvalds committed
913
	}
914
	mutex_unlock(&module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
915 916 917
}

#ifdef CONFIG_MODULE_FORCE_UNLOAD
918
static inline int try_force_unload(unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
919 920 921
{
	int ret = (flags & O_TRUNC);
	if (ret)
922
		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
923 924 925
	return ret;
}
#else
926
static inline int try_force_unload(unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
927 928 929 930 931
{
	return 0;
}
#endif /* CONFIG_MODULE_FORCE_UNLOAD */

932 933
/* Try to release refcount of module, 0 means success. */
static int try_release_module_ref(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
934
{
935
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
936

937 938 939 940 941 942
	/* Try to decrement refcnt which we set at loading */
	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
	BUG_ON(ret < 0);
	if (ret)
		/* Someone can put this right now, recover with checking */
		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
Linus Torvalds's avatar
Linus Torvalds committed
943

944 945
	return ret;
}
Linus Torvalds's avatar
Linus Torvalds committed
946

947 948
static int try_stop_module(struct module *mod, int flags, int *forced)
{
949
	/* If it's not unused, quit unless we're forcing. */
950 951 952
	if (try_release_module_ref(mod) != 0) {
		*forced = try_force_unload(flags);
		if (!(*forced))
Linus Torvalds's avatar
Linus Torvalds committed
953 954 955 956
			return -EWOULDBLOCK;
	}

	/* Mark it as dying. */
957
	mod->state = MODULE_STATE_GOING;
Linus Torvalds's avatar
Linus Torvalds committed
958

959
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
960 961
}

962 963 964 965 966 967 968 969 970 971
/**
 * module_refcount - return the refcount or -1 if unloading
 *
 * @mod:	the module we're checking
 *
 * Returns:
 *	-1 if the module is in the process of unloading
 *	otherwise the number of references in the kernel to the module
 */
int module_refcount(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
972
{
973
	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
Linus Torvalds's avatar
Linus Torvalds committed
974 975 976 977 978 979
}
EXPORT_SYMBOL(module_refcount);

/* This exists whether we can unload or not */
static void free_module(struct module *mod);

980 981
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
		unsigned int, flags)
Linus Torvalds's avatar
Linus Torvalds committed
982 983
{
	struct module *mod;
984
	char name[MODULE_NAME_LEN];
Linus Torvalds's avatar
Linus Torvalds committed
985 986
	int ret, forced = 0;

987
	if (!capable(CAP_SYS_MODULE) || modules_disabled)
988 989 990 991 992 993
		return -EPERM;

	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
		return -EFAULT;
	name[MODULE_NAME_LEN-1] = '\0';

994 995
	if (mutex_lock_interruptible(&module_mutex) != 0)
		return -EINTR;
Linus Torvalds's avatar
Linus Torvalds committed
996 997 998 999 1000 1001 1002

	mod = find_module(name);
	if (!mod) {
		ret = -ENOENT;
		goto out;
	}

1003
	if (!list_empty(&mod->source_list)) {
Linus Torvalds's avatar
Linus Torvalds committed
1004 1005 1006 1007 1008 1009 1010
		/* Other modules depend on us: get rid of them first. */
		ret = -EWOULDBLOCK;
		goto out;
	}

	/* Doing init or already dying? */
	if (mod->state != MODULE_STATE_LIVE) {
1011
		/* FIXME: if (force), slam module count damn the torpedoes */
1012
		pr_debug("%s already dying\n", mod->name);
Linus Torvalds's avatar
Linus Torvalds committed
1013 1014 1015 1016 1017
		ret = -EBUSY;
		goto out;
	}

	/* If it has an init func, it must have an exit func to unload */
1018
	if (mod->init && !mod->exit) {
1019
		forced = try_force_unload(flags);
Linus Torvalds's avatar
Linus Torvalds committed
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
		if (!forced) {
			/* This module can't be removed */
			ret = -EBUSY;
			goto out;
		}
	}

	/* Stop the machine so refcounts can't move and disable module. */
	ret = try_stop_module(mod, flags, &forced);
	if (ret != 0)
		goto out;

1032
	mutex_unlock(&module_mutex);
Lucas De Marchi's avatar
Lucas De Marchi committed
1033
	/* Final destruction now no one is using it. */
1034
	if (mod->exit != NULL)
Linus Torvalds's avatar
Linus Torvalds committed
1035
		mod->exit();
1036 1037
	blocking_notifier_call_chain(&module_notify_list,
				     MODULE_STATE_GOING, mod);
1038
	async_synchronize_full();
1039

1040
	/* Store the name of the last unloaded module for diagnostic purposes */
1041
	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
Linus Torvalds's avatar
Linus Torvalds committed
1042

1043 1044 1045
	free_module(mod);
	return 0;
out:
1046
	mutex_unlock(&module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
1047 1048 1049
	return ret;
}

1050
static inline void print_unload_info(struct seq_file *m, struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1051 1052 1053 1054
{
	struct module_use *use;
	int printed_something = 0;

1055
	seq_printf(m, " %i ", module_refcount(mod));
Linus Torvalds's avatar
Linus Torvalds committed
1056

1057 1058 1059 1060
	/*
	 * Always include a trailing , so userspace can differentiate
	 * between this and the old multi-field proc format.
	 */
1061
	list_for_each_entry(use, &mod->source_list, source_list) {
Linus Torvalds's avatar
Linus Torvalds committed
1062
		printed_something = 1;
1063
		seq_printf(m, "%s,", use->source->name);
Linus Torvalds's avatar
Linus Torvalds committed
1064 1065 1066 1067
	}

	if (mod->init != NULL && mod->exit == NULL) {
		printed_something = 1;
1068
		seq_puts(m, "[permanent],");
Linus Torvalds's avatar
Linus Torvalds committed
1069 1070 1071
	}

	if (!printed_something)
1072
		seq_puts(m, "-");
Linus Torvalds's avatar
Linus Torvalds committed
1073 1074 1075 1076 1077 1078
}

void __symbol_put(const char *symbol)
{
	struct module *owner;

Rusty Russell's avatar
Rusty Russell committed
1079
	preempt_disable();
1080
	if (!find_symbol(symbol, &owner, NULL, true, false))
Linus Torvalds's avatar
Linus Torvalds committed
1081 1082
		BUG();
	module_put(owner);
Rusty Russell's avatar
Rusty Russell committed
1083
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
1084 1085 1086
}
EXPORT_SYMBOL(__symbol_put);

1087
/* Note this assumes addr is a function, which it currently always is. */
Linus Torvalds's avatar
Linus Torvalds committed
1088 1089
void symbol_put_addr(void *addr)
{
1090
	struct module *modaddr;
1091
	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
Linus Torvalds's avatar
Linus Torvalds committed
1092

1093
	if (core_kernel_text(a))
1094
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1095

1096 1097
	/* module_text_address is safe here: we're supposed to have reference
	 * to module from symbol_get, so it can't go away. */
1098
	modaddr = __module_text_address(a);
1099
	BUG_ON(!modaddr);
1100
	module_put(modaddr);
Linus Torvalds's avatar
Linus Torvalds committed
1101 1102 1103 1104
}
EXPORT_SYMBOL_GPL(symbol_put_addr);

static ssize_t show_refcnt(struct module_attribute *mattr,
1105
			   struct module_kobject *mk, char *buffer)
Linus Torvalds's avatar
Linus Torvalds committed
1106
{
1107
	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
Linus Torvalds's avatar
Linus Torvalds committed
1108 1109
}

1110 1111
static struct module_attribute modinfo_refcnt =
	__ATTR(refcnt, 0444, show_refcnt, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1112

1113 1114 1115 1116
void __module_get(struct module *module)
{
	if (module) {
		preempt_disable();
1117
		atomic_inc(&module->refcnt);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
		trace_module_get(module, _RET_IP_);
		preempt_enable();
	}
}
EXPORT_SYMBOL(__module_get);

bool try_module_get(struct module *module)
{
	bool ret = true;

	if (module) {
		preempt_disable();
1130 1131 1132
		/* Note: here, we can fail to get a reference */
		if (likely(module_is_live(module) &&
			   atomic_inc_not_zero(&module->refcnt) != 0))
1133
			trace_module_get(module, _RET_IP_);
1134
		else
1135 1136 1137 1138 1139 1140 1141 1142
			ret = false;

		preempt_enable();
	}
	return ret;
}
EXPORT_SYMBOL(try_module_get);

Al Viro's avatar
Al Viro committed
1143 1144
void module_put(struct module *module)
{
1145 1146
	int ret;

Al Viro's avatar
Al Viro committed
1147
	if (module) {
1148
		preempt_disable();
1149 1150
		ret = atomic_dec_if_positive(&module->refcnt);
		WARN_ON(ret < 0);	/* Failed to put refcount */
1151
		trace_module_put(module, _RET_IP_);
1152
		preempt_enable();
Al Viro's avatar
Al Viro committed
1153 1154 1155 1156
	}
}
EXPORT_SYMBOL(module_put);

Linus Torvalds's avatar
Linus Torvalds committed
1157
#else /* !CONFIG_MODULE_UNLOAD */
1158
static inline void print_unload_info(struct seq_file *m, struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1159 1160
{
	/* We don't know the usage count, or what modules are using. */
1161
	seq_puts(m, " - -");
Linus Torvalds's avatar
Linus Torvalds committed
1162 1163 1164 1165 1166 1167
}

static inline void module_unload_free(struct module *mod)
{
}

1168
int ref_module(struct module *a, struct module *b)
Linus Torvalds's avatar
Linus Torvalds committed
1169
{
1170
	return strong_try_module_get(b);
Linus Torvalds's avatar
Linus Torvalds committed
1171
}
1172
EXPORT_SYMBOL_GPL(ref_module);
Linus Torvalds's avatar
Linus Torvalds committed
1173

1174
static inline int module_unload_init(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1175
{
1176
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
1177 1178 1179
}
#endif /* CONFIG_MODULE_UNLOAD */

1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
static size_t module_flags_taint(struct module *mod, char *buf)
{
	size_t l = 0;

	if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
		buf[l++] = 'P';
	if (mod->taints & (1 << TAINT_OOT_MODULE))
		buf[l++] = 'O';
	if