module.c 104 KB
Newer Older
1
/*
Linus Torvalds's avatar
Linus Torvalds committed
2
   Copyright (C) 2002 Richard Henderson
3
   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
Linus Torvalds's avatar
Linus Torvalds committed
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
*/
19
#include <linux/export.h>
Linus Torvalds's avatar
Linus Torvalds committed
20
#include <linux/moduleloader.h>
21
#include <linux/trace_events.h>
Linus Torvalds's avatar
Linus Torvalds committed
22
#include <linux/init.h>
23
#include <linux/kallsyms.h>
24
#include <linux/file.h>
25
#include <linux/fs.h>
Roland McGrath's avatar
Roland McGrath committed
26
#include <linux/sysfs.h>
27
#include <linux/kernel.h>
Linus Torvalds's avatar
Linus Torvalds committed
28 29 30
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/elf.h>
31
#include <linux/proc_fs.h>
32
#include <linux/security.h>
Linus Torvalds's avatar
Linus Torvalds committed
33 34 35 36
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <linux/rcupdate.h>
37
#include <linux/capability.h>
Linus Torvalds's avatar
Linus Torvalds committed
38 39 40 41 42 43
#include <linux/cpu.h>
#include <linux/moduleparam.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/vermagic.h>
#include <linux/notifier.h>
Al Viro's avatar
Al Viro committed
44
#include <linux/sched.h>
Linus Torvalds's avatar
Linus Torvalds committed
45
#include <linux/device.h>
46
#include <linux/string.h>
47
#include <linux/mutex.h>
48
#include <linux/rculist.h>
Linus Torvalds's avatar
Linus Torvalds committed
49 50
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
51
#include <asm/mmu_context.h>
52
#include <linux/license.h>
53
#include <asm/sections.h>
54
#include <linux/tracepoint.h>
55
#include <linux/ftrace.h>
56
#include <linux/async.h>
57
#include <linux/percpu.h>
58
#include <linux/kmemleak.h>
59
#include <linux/jump_label.h>
60
#include <linux/pfn.h>
61
#include <linux/bsearch.h>
62
#include <uapi/linux/module.h>
63
#include "module-internal.h"
Linus Torvalds's avatar
Linus Torvalds committed
64

65 66 67
#define CREATE_TRACE_POINTS
#include <trace/events/module.h>

Linus Torvalds's avatar
Linus Torvalds committed
68 69 70 71
#ifndef ARCH_SHF_SMALL
#define ARCH_SHF_SMALL 0
#endif

72 73 74 75 76 77 78 79 80 81 82
/*
 * Modules' sections will be aligned on page boundaries
 * to ensure complete separation of code and data, but
 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
 */
#ifdef CONFIG_DEBUG_SET_MODULE_RONX
# define debug_align(X) ALIGN(X, PAGE_SIZE)
#else
# define debug_align(X) (X)
#endif

Linus Torvalds's avatar
Linus Torvalds committed
83 84 85
/* If this is set, the section belongs in the init part of the module */
#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))

86 87 88 89 90
/*
 * Mutex protects:
 * 1) List of modules (also safely readable with preempt_disable),
 * 2) module_use links,
 * 3) module_addr_min/module_addr_max.
91
 * (delete and add uses RCU list operations). */
92 93
DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
94
static LIST_HEAD(modules);
95

96
#ifdef CONFIG_MODULES_TREE_LOOKUP
97

98 99 100 101
/*
 * Use a latched RB-tree for __module_address(); this allows us to use
 * RCU-sched lookups of the address from any context.
 *
102 103 104
 * This is conditional on PERF_EVENTS || TRACING because those can really hit
 * __module_address() hard by doing a lot of stack unwinding; potentially from
 * NMI context.
105 106 107
 */

static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
108
{
109
	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
110

111
	return (unsigned long)layout->base;
112 113 114 115
}

static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
{
116
	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
117

118
	return (unsigned long)layout->size;
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
}

static __always_inline bool
mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
{
	return __mod_tree_val(a) < __mod_tree_val(b);
}

static __always_inline int
mod_tree_comp(void *key, struct latch_tree_node *n)
{
	unsigned long val = (unsigned long)key;
	unsigned long start, end;

	start = __mod_tree_val(n);
	if (val < start)
		return -1;

	end = start + __mod_tree_size(n);
	if (val >= end)
		return 1;
140 141 142 143

	return 0;
}

144 145 146 147 148
static const struct latch_tree_ops mod_tree_ops = {
	.less = mod_tree_less,
	.comp = mod_tree_comp,
};

149 150 151 152 153 154
static struct mod_tree_root {
	struct latch_tree_root root;
	unsigned long addr_min;
	unsigned long addr_max;
} mod_tree __cacheline_aligned = {
	.addr_min = -1UL,
155 156
};

157 158 159 160 161 162 163 164 165 166 167 168
#define module_addr_min mod_tree.addr_min
#define module_addr_max mod_tree.addr_max

static noinline void __mod_tree_insert(struct mod_tree_node *node)
{
	latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
}

static void __mod_tree_remove(struct mod_tree_node *node)
{
	latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
}
169 170 171 172 173 174 175

/*
 * These modifications: insert, remove_init and remove; are serialized by the
 * module_mutex.
 */
static void mod_tree_insert(struct module *mod)
{
176 177
	mod->core_layout.mtn.mod = mod;
	mod->init_layout.mtn.mod = mod;
178

179 180 181
	__mod_tree_insert(&mod->core_layout.mtn);
	if (mod->init_layout.size)
		__mod_tree_insert(&mod->init_layout.mtn);
182 183 184 185
}

static void mod_tree_remove_init(struct module *mod)
{
186 187
	if (mod->init_layout.size)
		__mod_tree_remove(&mod->init_layout.mtn);
188 189 190 191
}

static void mod_tree_remove(struct module *mod)
{
192
	__mod_tree_remove(&mod->core_layout.mtn);
193 194 195
	mod_tree_remove_init(mod);
}

196
static struct module *mod_find(unsigned long addr)
197 198 199
{
	struct latch_tree_node *ltn;

200
	ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
201 202 203 204 205 206
	if (!ltn)
		return NULL;

	return container_of(ltn, struct mod_tree_node, node)->mod;
}

207 208
#else /* MODULES_TREE_LOOKUP */

209 210
static unsigned long module_addr_min = -1UL, module_addr_max = 0;

211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
static void mod_tree_insert(struct module *mod) { }
static void mod_tree_remove_init(struct module *mod) { }
static void mod_tree_remove(struct module *mod) { }

static struct module *mod_find(unsigned long addr)
{
	struct module *mod;

	list_for_each_entry_rcu(mod, &modules, list) {
		if (within_module(addr, mod))
			return mod;
	}

	return NULL;
}

#endif /* MODULES_TREE_LOOKUP */

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
/*
 * Bounds of module text, for speeding up __module_address.
 * Protected by module_mutex.
 */
static void __mod_update_bounds(void *base, unsigned int size)
{
	unsigned long min = (unsigned long)base;
	unsigned long max = min + size;

	if (min < module_addr_min)
		module_addr_min = min;
	if (max > module_addr_max)
		module_addr_max = max;
}

static void mod_update_bounds(struct module *mod)
{
246 247 248
	__mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
	if (mod->init_layout.size)
		__mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
249 250
}

251 252 253 254
#ifdef CONFIG_KGDB_KDB
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static void module_assert_mutex(void)
{
	lockdep_assert_held(&module_mutex);
}

static void module_assert_mutex_or_preempt(void)
{
#ifdef CONFIG_LOCKDEP
	if (unlikely(!debug_locks))
		return;

	WARN_ON(!rcu_read_lock_sched_held() &&
		!lockdep_is_held(&module_mutex));
#endif
}

271 272
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
#ifndef CONFIG_MODULE_SIG_FORCE
273 274
module_param(sig_enforce, bool_enable_only, 0644);
#endif /* !CONFIG_MODULE_SIG_FORCE */
Linus Torvalds's avatar
Linus Torvalds committed
275

276 277
/* Block module loading/unloading? */
int modules_disabled = 0;
278
core_param(nomodule, modules_disabled, bint, 0);
279

280 281 282
/* Waiting for a module to finish initializing? */
static DECLARE_WAIT_QUEUE_HEAD(module_wq);

283
static BLOCKING_NOTIFIER_HEAD(module_notify_list);
Linus Torvalds's avatar
Linus Torvalds committed
284

285
int register_module_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
286
{
287
	return blocking_notifier_chain_register(&module_notify_list, nb);
Linus Torvalds's avatar
Linus Torvalds committed
288 289 290
}
EXPORT_SYMBOL(register_module_notifier);

291
int unregister_module_notifier(struct notifier_block *nb)
Linus Torvalds's avatar
Linus Torvalds committed
292
{
293
	return blocking_notifier_chain_unregister(&module_notify_list, nb);
Linus Torvalds's avatar
Linus Torvalds committed
294 295 296
}
EXPORT_SYMBOL(unregister_module_notifier);

297 298 299 300
struct load_info {
	Elf_Ehdr *hdr;
	unsigned long len;
	Elf_Shdr *sechdrs;
301
	char *secstrings, *strtab;
Rusty Russell's avatar
Rusty Russell committed
302
	unsigned long symoffs, stroffs;
303 304
	struct _ddebug *debug;
	unsigned int num_debug;
305
	bool sig_ok;
306 307 308
#ifdef CONFIG_KALLSYMS
	unsigned long mod_kallsyms_init_off;
#endif
309 310 311 312 313
	struct {
		unsigned int sym, str, mod, vers, info, pcpu;
	} index;
};

314 315
/* We require a truly strong try_module_get(): 0 means failure due to
   ongoing or failed initialization etc. */
Linus Torvalds's avatar
Linus Torvalds committed
316 317
static inline int strong_try_module_get(struct module *mod)
{
318
	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
Linus Torvalds's avatar
Linus Torvalds committed
319
	if (mod && mod->state == MODULE_STATE_COMING)
320 321
		return -EBUSY;
	if (try_module_get(mod))
Linus Torvalds's avatar
Linus Torvalds committed
322
		return 0;
323 324
	else
		return -ENOENT;
Linus Torvalds's avatar
Linus Torvalds committed
325 326
}

327 328
static inline void add_taint_module(struct module *mod, unsigned flag,
				    enum lockdep_ok lockdep_ok)
329
{
330
	add_taint(flag, lockdep_ok);
Andi Kleen's avatar
Andi Kleen committed
331
	mod->taints |= (1U << flag);
332 333
}

334 335 336
/*
 * A thread that wants to hold a reference to a module only while it
 * is running can call this to safely exit.  nfsd and lockd use this.
Linus Torvalds's avatar
Linus Torvalds committed
337 338 339 340 341 342 343
 */
void __module_put_and_exit(struct module *mod, long code)
{
	module_put(mod);
	do_exit(code);
}
EXPORT_SYMBOL(__module_put_and_exit);
344

Linus Torvalds's avatar
Linus Torvalds committed
345
/* Find a module section: 0 means not found. */
346
static unsigned int find_sec(const struct load_info *info, const char *name)
Linus Torvalds's avatar
Linus Torvalds committed
347 348 349
{
	unsigned int i;

350 351
	for (i = 1; i < info->hdr->e_shnum; i++) {
		Elf_Shdr *shdr = &info->sechdrs[i];
Linus Torvalds's avatar
Linus Torvalds committed
352
		/* Alloc bit cleared means "ignore it." */
353 354
		if ((shdr->sh_flags & SHF_ALLOC)
		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
Linus Torvalds's avatar
Linus Torvalds committed
355
			return i;
356
	}
Linus Torvalds's avatar
Linus Torvalds committed
357 358 359
	return 0;
}

360
/* Find a module section, or NULL. */
361
static void *section_addr(const struct load_info *info, const char *name)
362 363
{
	/* Section 0 has sh_addr 0. */
364
	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
365 366 367
}

/* Find a module section, or NULL.  Fill in number of "objects" in section. */
368
static void *section_objs(const struct load_info *info,
369 370 371 372
			  const char *name,
			  size_t object_size,
			  unsigned int *num)
{
373
	unsigned int sec = find_sec(info, name);
374 375

	/* Section 0 has sh_addr 0 and sh_size 0. */
376 377
	*num = info->sechdrs[sec].sh_size / object_size;
	return (void *)info->sechdrs[sec].sh_addr;
378 379
}

Linus Torvalds's avatar
Linus Torvalds committed
380 381 382 383 384
/* Provided by the linker */
extern const struct kernel_symbol __start___ksymtab[];
extern const struct kernel_symbol __stop___ksymtab[];
extern const struct kernel_symbol __start___ksymtab_gpl[];
extern const struct kernel_symbol __stop___ksymtab_gpl[];
385 386
extern const struct kernel_symbol __start___ksymtab_gpl_future[];
extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
Linus Torvalds's avatar
Linus Torvalds committed
387 388
extern const unsigned long __start___kcrctab[];
extern const unsigned long __start___kcrctab_gpl[];
389
extern const unsigned long __start___kcrctab_gpl_future[];
390 391 392 393 394
#ifdef CONFIG_UNUSED_SYMBOLS
extern const struct kernel_symbol __start___ksymtab_unused[];
extern const struct kernel_symbol __stop___ksymtab_unused[];
extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
395 396
extern const unsigned long __start___kcrctab_unused[];
extern const unsigned long __start___kcrctab_unused_gpl[];
397
#endif
Linus Torvalds's avatar
Linus Torvalds committed
398 399 400 401

#ifndef CONFIG_MODVERSIONS
#define symversion(base, idx) NULL
#else
402
#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
Linus Torvalds's avatar
Linus Torvalds committed
403 404
#endif

405 406 407 408 409
static bool each_symbol_in_section(const struct symsearch *arr,
				   unsigned int arrsize,
				   struct module *owner,
				   bool (*fn)(const struct symsearch *syms,
					      struct module *owner,
410
					      void *data),
411
				   void *data)
412
{
413
	unsigned int j;
414

415
	for (j = 0; j < arrsize; j++) {
416 417
		if (fn(&arr[j], owner, data))
			return true;
418
	}
419 420

	return false;
421 422
}

423
/* Returns true as soon as fn returns true, otherwise false. */
424 425 426 427
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
				    struct module *owner,
				    void *data),
			 void *data)
428 429
{
	struct module *mod;
430
	static const struct symsearch arr[] = {
431
		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
432
		  NOT_GPL_ONLY, false },
433
		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
434 435
		  __start___kcrctab_gpl,
		  GPL_ONLY, false },
436
		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
437 438
		  __start___kcrctab_gpl_future,
		  WILL_BE_GPL_ONLY, false },
439
#ifdef CONFIG_UNUSED_SYMBOLS
440
		{ __start___ksymtab_unused, __stop___ksymtab_unused,
441 442
		  __start___kcrctab_unused,
		  NOT_GPL_ONLY, true },
443
		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
444 445
		  __start___kcrctab_unused_gpl,
		  GPL_ONLY, true },
446
#endif
447
	};
448

449 450
	module_assert_mutex_or_preempt();

451 452
	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
		return true;
453

454
	list_for_each_entry_rcu(mod, &modules, list) {
455 456
		struct symsearch arr[] = {
			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
457
			  NOT_GPL_ONLY, false },
458
			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
459 460
			  mod->gpl_crcs,
			  GPL_ONLY, false },
461 462
			{ mod->gpl_future_syms,
			  mod->gpl_future_syms + mod->num_gpl_future_syms,
463 464
			  mod->gpl_future_crcs,
			  WILL_BE_GPL_ONLY, false },
465
#ifdef CONFIG_UNUSED_SYMBOLS
466 467
			{ mod->unused_syms,
			  mod->unused_syms + mod->num_unused_syms,
468 469
			  mod->unused_crcs,
			  NOT_GPL_ONLY, true },
470 471
			{ mod->unused_gpl_syms,
			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
472 473
			  mod->unused_gpl_crcs,
			  GPL_ONLY, true },
474
#endif
475 476
		};

477 478 479
		if (mod->state == MODULE_STATE_UNFORMED)
			continue;

480 481 482 483 484
		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
			return true;
	}
	return false;
}
485
EXPORT_SYMBOL_GPL(each_symbol_section);
486 487 488 489 490 491 492 493 494 495

struct find_symbol_arg {
	/* Input */
	const char *name;
	bool gplok;
	bool warn;

	/* Output */
	struct module *owner;
	const unsigned long *crc;
496
	const struct kernel_symbol *sym;
497 498
};

499 500 501
static bool check_symbol(const struct symsearch *syms,
				 struct module *owner,
				 unsigned int symnum, void *data)
502 503 504 505 506 507 508
{
	struct find_symbol_arg *fsa = data;

	if (!fsa->gplok) {
		if (syms->licence == GPL_ONLY)
			return false;
		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
509 510 511
			pr_warn("Symbol %s is being used by a non-GPL module, "
				"which will not be allowed in the future\n",
				fsa->name);
512
		}
Linus Torvalds's avatar
Linus Torvalds committed
513
	}
514

515
#ifdef CONFIG_UNUSED_SYMBOLS
516
	if (syms->unused && fsa->warn) {
517 518 519
		pr_warn("Symbol %s is marked as UNUSED, however this module is "
			"using it.\n", fsa->name);
		pr_warn("This symbol will go away in the future.\n");
520 521 522
		pr_warn("Please evaluate if this is the right api to use and "
			"if it really is, submit a report to the linux kernel "
			"mailing list together with submitting your code for "
523
			"inclusion.\n");
524
	}
525
#endif
526 527 528

	fsa->owner = owner;
	fsa->crc = symversion(syms->crcs, symnum);
529
	fsa->sym = &syms->start[symnum];
530 531 532
	return true;
}

533 534 535 536 537 538 539 540
static int cmp_name(const void *va, const void *vb)
{
	const char *a;
	const struct kernel_symbol *b;
	a = va; b = vb;
	return strcmp(a, b->name);
}

541 542 543 544 545
static bool find_symbol_in_section(const struct symsearch *syms,
				   struct module *owner,
				   void *data)
{
	struct find_symbol_arg *fsa = data;
546 547 548 549 550 551 552
	struct kernel_symbol *sym;

	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
			sizeof(struct kernel_symbol), cmp_name);

	if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
		return true;
553 554 555 556

	return false;
}

557
/* Find a symbol and return it, along with, (optional) crc and
558
 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
559 560 561 562 563
const struct kernel_symbol *find_symbol(const char *name,
					struct module **owner,
					const unsigned long **crc,
					bool gplok,
					bool warn)
564 565 566 567 568 569 570
{
	struct find_symbol_arg fsa;

	fsa.name = name;
	fsa.gplok = gplok;
	fsa.warn = warn;

571
	if (each_symbol_section(find_symbol_in_section, &fsa)) {
572 573 574 575
		if (owner)
			*owner = fsa.owner;
		if (crc)
			*crc = fsa.crc;
576
		return fsa.sym;
577 578
	}

579
	pr_debug("Failed to find symbol %s\n", name);
580
	return NULL;
Linus Torvalds's avatar
Linus Torvalds committed
581
}
582
EXPORT_SYMBOL_GPL(find_symbol);
Linus Torvalds's avatar
Linus Torvalds committed
583

584 585 586 587
/*
 * Search for module by name: must hold module_mutex (or preempt disabled
 * for read-only access).
 */
588
static struct module *find_module_all(const char *name, size_t len,
589
				      bool even_unformed)
Linus Torvalds's avatar
Linus Torvalds committed
590 591 592
{
	struct module *mod;

593
	module_assert_mutex_or_preempt();
594

Linus Torvalds's avatar
Linus Torvalds committed
595
	list_for_each_entry(mod, &modules, list) {
596 597
		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
			continue;
598
		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
Linus Torvalds's avatar
Linus Torvalds committed
599 600 601 602
			return mod;
	}
	return NULL;
}
603 604 605

struct module *find_module(const char *name)
{
606
	module_assert_mutex();
607
	return find_module_all(name, strlen(name), false);
608
}
609
EXPORT_SYMBOL_GPL(find_module);
Linus Torvalds's avatar
Linus Torvalds committed
610 611

#ifdef CONFIG_SMP
612

613
static inline void __percpu *mod_percpu(struct module *mod)
614
{
615 616
	return mod->percpu;
}
617

Rusty Russell's avatar
Rusty Russell committed
618
static int percpu_modalloc(struct module *mod, struct load_info *info)
619
{
Rusty Russell's avatar
Rusty Russell committed
620 621 622 623 624 625
	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
	unsigned long align = pcpusec->sh_addralign;

	if (!pcpusec->sh_size)
		return 0;

626
	if (align > PAGE_SIZE) {
627 628
		pr_warn("%s: per-cpu alignment %li > %li\n",
			mod->name, align, PAGE_SIZE);
629 630 631
		align = PAGE_SIZE;
	}

Rusty Russell's avatar
Rusty Russell committed
632
	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
633
	if (!mod->percpu) {
634 635
		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
			mod->name, (unsigned long)pcpusec->sh_size);
636 637
		return -ENOMEM;
	}
Rusty Russell's avatar
Rusty Russell committed
638
	mod->percpu_size = pcpusec->sh_size;
639
	return 0;
640 641
}

642
static void percpu_modfree(struct module *mod)
643
{
644
	free_percpu(mod->percpu);
645 646
}

647
static unsigned int find_pcpusec(struct load_info *info)
648
{
649
	return find_sec(info, ".data..percpu");
650 651
}

652 653
static void percpu_modcopy(struct module *mod,
			   const void *from, unsigned long size)
654 655 656 657
{
	int cpu;

	for_each_possible_cpu(cpu)
658
		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
659 660
}

661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
/**
 * is_module_percpu_address - test whether address is from module static percpu
 * @addr: address to test
 *
 * Test whether @addr belongs to module static percpu area.
 *
 * RETURNS:
 * %true if @addr is from module static percpu area
 */
bool is_module_percpu_address(unsigned long addr)
{
	struct module *mod;
	unsigned int cpu;

	preempt_disable();

	list_for_each_entry_rcu(mod, &modules, list) {
678 679
		if (mod->state == MODULE_STATE_UNFORMED)
			continue;
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
		if (!mod->percpu_size)
			continue;
		for_each_possible_cpu(cpu) {
			void *start = per_cpu_ptr(mod->percpu, cpu);

			if ((void *)addr >= start &&
			    (void *)addr < start + mod->percpu_size) {
				preempt_enable();
				return true;
			}
		}
	}

	preempt_enable();
	return false;
695 696
}

Linus Torvalds's avatar
Linus Torvalds committed
697
#else /* ... !CONFIG_SMP */
698

699
static inline void __percpu *mod_percpu(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
700 701 702
{
	return NULL;
}
Rusty Russell's avatar
Rusty Russell committed
703
static int percpu_modalloc(struct module *mod, struct load_info *info)
704
{
Rusty Russell's avatar
Rusty Russell committed
705 706 707 708
	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
	if (info->sechdrs[info->index.pcpu].sh_size != 0)
		return -ENOMEM;
	return 0;
709 710
}
static inline void percpu_modfree(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
711 712
{
}
713
static unsigned int find_pcpusec(struct load_info *info)
Linus Torvalds's avatar
Linus Torvalds committed
714 715 716
{
	return 0;
}
717 718
static inline void percpu_modcopy(struct module *mod,
				  const void *from, unsigned long size)
Linus Torvalds's avatar
Linus Torvalds committed
719 720 721 722
{
	/* pcpusec should be 0, and size of that section should be 0. */
	BUG_ON(size != 0);
}
723 724 725 726
bool is_module_percpu_address(unsigned long addr)
{
	return false;
}
727

Linus Torvalds's avatar
Linus Torvalds committed
728 729
#endif /* CONFIG_SMP */

730 731 732 733 734 735
#define MODINFO_ATTR(field)	\
static void setup_modinfo_##field(struct module *mod, const char *s)  \
{                                                                     \
	mod->field = kstrdup(s, GFP_KERNEL);                          \
}                                                                     \
static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
736
			struct module_kobject *mk, char *buffer)      \
737
{                                                                     \
738
	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
739 740 741 742 743 744 745
}                                                                     \
static int modinfo_##field##_exists(struct module *mod)               \
{                                                                     \
	return mod->field != NULL;                                    \
}                                                                     \
static void free_modinfo_##field(struct module *mod)                  \
{                                                                     \
746 747
	kfree(mod->field);                                            \
	mod->field = NULL;                                            \
748 749
}                                                                     \
static struct module_attribute modinfo_##field = {                    \
750
	.attr = { .name = __stringify(field), .mode = 0444 },         \
751 752 753 754 755 756 757 758 759
	.show = show_modinfo_##field,                                 \
	.setup = setup_modinfo_##field,                               \
	.test = modinfo_##field##_exists,                             \
	.free = free_modinfo_##field,                                 \
};

MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);

760 761
static char last_unloaded_module[MODULE_NAME_LEN+1];

762
#ifdef CONFIG_MODULE_UNLOAD
763 764 765

EXPORT_TRACEPOINT_SYMBOL(module_get);

766 767 768
/* MODULE_REF_BASE is the base reference count by kmodule loader. */
#define MODULE_REF_BASE	1

Linus Torvalds's avatar
Linus Torvalds committed
769
/* Init the unload section of the module. */
770
static int module_unload_init(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
771
{
772 773 774 775 776
	/*
	 * Initialize reference counter to MODULE_REF_BASE.
	 * refcnt == 0 means module is going.
	 */
	atomic_set(&mod->refcnt, MODULE_REF_BASE);
777

778 779
	INIT_LIST_HEAD(&mod->source_list);
	INIT_LIST_HEAD(&mod->target_list);
780

Linus Torvalds's avatar
Linus Torvalds committed
781
	/* Hold reference count during initialization. */
782
	atomic_inc(&mod->refcnt);
783 784

	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
785 786 787 788 789 790 791
}

/* Does a already use b? */
static int already_uses(struct module *a, struct module *b)
{
	struct module_use *use;

792 793
	list_for_each_entry(use, &b->source_list, source_list) {
		if (use->source == a) {
794
			pr_debug("%s uses %s!\n", a->name, b->name);
Linus Torvalds's avatar
Linus Torvalds committed
795 796 797
			return 1;
		}
	}
798
	pr_debug("%s does not use %s!\n", a->name, b->name);
Linus Torvalds's avatar
Linus Torvalds committed
799 800 801
	return 0;
}

802 803 804 805 806 807 808 809 810 811 812
/*
 * Module a uses b
 *  - we add 'a' as a "source", 'b' as a "target" of module use
 *  - the module_use is added to the list of 'b' sources (so
 *    'b' can walk the list to see who sourced them), and of 'a'
 *    targets (so 'a' can see what modules it targets).
 */
static int add_module_usage(struct module *a, struct module *b)
{
	struct module_use *use;

813
	pr_debug("Allocating new usage for %s.\n", a->name);
814 815
	use = kmalloc(sizeof(*use), GFP_ATOMIC);
	if (!use) {
816
		pr_warn("%s: out of memory loading\n", a->name);
817 818 819 820 821 822 823 824 825 826
		return -ENOMEM;
	}

	use->source = a;
	use->target = b;
	list_add(&use->source_list, &b->source_list);
	list_add(&use->target_list, &a->target_list);
	return 0;
}

827
/* Module a uses b: caller needs module_mutex() */
828
int ref_module(struct module *a, struct module *b)
Linus Torvalds's avatar
Linus Torvalds committed
829
{
830
	int err;
Kay Sievers's avatar
Kay Sievers committed
831

832
	if (b == NULL || already_uses(a, b))
833 834
		return 0;

835 836
	/* If module isn't available, we fail. */
	err = strong_try_module_get(b);
837
	if (err)
838
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
839

840 841
	err = add_module_usage(a, b);
	if (err) {
Linus Torvalds's avatar
Linus Torvalds committed
842
		module_put(b);
843
		return err;
Linus Torvalds's avatar
Linus Torvalds committed
844
	}
845
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
846
}
847
EXPORT_SYMBOL_GPL(ref_module);
Linus Torvalds's avatar
Linus Torvalds committed
848 849 850 851

/* Clear the unload stuff of the module. */
static void module_unload_free(struct module *mod)
{
852
	struct module_use *use, *tmp;
Linus Torvalds's avatar
Linus Torvalds committed
853

854
	mutex_lock(&module_mutex);
855 856
	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
		struct module *i = use->target;
857
		pr_debug("%s unusing %s\n", mod->name, i->name);
858 859 860 861
		module_put(i);
		list_del(&use->source_list);
		list_del(&use->target_list);
		kfree(use);
Linus Torvalds's avatar
Linus Torvalds committed
862
	}
863
	mutex_unlock(&module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
864 865 866
}

#ifdef CONFIG_MODULE_FORCE_UNLOAD
867
static inline int try_force_unload(unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
868 869 870
{
	int ret = (flags & O_TRUNC);
	if (ret)
871
		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
Linus Torvalds's avatar
Linus Torvalds committed
872 873 874
	return ret;
}
#else
875
static inline int try_force_unload(unsigned int flags)
Linus Torvalds's avatar
Linus Torvalds committed
876 877 878 879 880
{
	return 0;
}
#endif /* CONFIG_MODULE_FORCE_UNLOAD */

881 882
/* Try to release refcount of module, 0 means success. */
static int try_release_module_ref(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
883
{
884
	int ret;
Linus Torvalds's avatar
Linus Torvalds committed
885

886 887 888 889 890 891
	/* Try to decrement refcnt which we set at loading */
	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
	BUG_ON(ret < 0);
	if (ret)
		/* Someone can put this right now, recover with checking */
		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
Linus Torvalds's avatar
Linus Torvalds committed
892

893 894
	return ret;
}
Linus Torvalds's avatar
Linus Torvalds committed
895

896 897
static int try_stop_module(struct module *mod, int flags, int *forced)
{
898
	/* If it's not unused, quit unless we're forcing. */
899 900 901
	if (try_release_module_ref(mod) != 0) {
		*forced = try_force_unload(flags);
		if (!(*forced))
Linus Torvalds's avatar
Linus Torvalds committed
902 903 904 905
			return -EWOULDBLOCK;
	}

	/* Mark it as dying. */
906
	mod->state = MODULE_STATE_GOING;
Linus Torvalds's avatar
Linus Torvalds committed
907

908
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
909 910
}

911 912 913 914 915 916 917 918 919 920
/**
 * module_refcount - return the refcount or -1 if unloading
 *
 * @mod:	the module we're checking
 *
 * Returns:
 *	-1 if the module is in the process of unloading
 *	otherwise the number of references in the kernel to the module
 */
int module_refcount(struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
921
{
922
	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
Linus Torvalds's avatar
Linus Torvalds committed
923 924 925 926 927 928
}
EXPORT_SYMBOL(module_refcount);

/* This exists whether we can unload or not */
static void free_module(struct module *mod);

929 930
SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
		unsigned int, flags)
Linus Torvalds's avatar
Linus Torvalds committed
931 932
{
	struct module *mod;
933
	char name[MODULE_NAME_LEN];
Linus Torvalds's avatar
Linus Torvalds committed
934 935
	int ret, forced = 0;

936
	if (!capable(CAP_SYS_MODULE) || modules_disabled)
937 938 939 940 941 942
		return -EPERM;

	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
		return -EFAULT;
	name[MODULE_NAME_LEN-1] = '\0';

943 944
	if (mutex_lock_interruptible(&module_mutex) != 0)
		return -EINTR;
Linus Torvalds's avatar
Linus Torvalds committed
945 946 947 948 949 950 951

	mod = find_module(name);
	if (!mod) {
		ret = -ENOENT;
		goto out;
	}

952
	if (!list_empty(&mod->source_list)) {
Linus Torvalds's avatar
Linus Torvalds committed
953 954 955 956 957 958 959
		/* Other modules depend on us: get rid of them first. */
		ret = -EWOULDBLOCK;
		goto out;
	}

	/* Doing init or already dying? */
	if (mod->state != MODULE_STATE_LIVE) {
960
		/* FIXME: if (force), slam module count damn the torpedoes */
961
		pr_debug("%s already dying\n", mod->name);
Linus Torvalds's avatar
Linus Torvalds committed
962 963 964 965 966
		ret = -EBUSY;
		goto out;
	}

	/* If it has an init func, it must have an exit func to unload */
967
	if (mod->init && !mod->exit) {
968
		forced = try_force_unload(flags);
Linus Torvalds's avatar
Linus Torvalds committed
969 970 971 972 973 974 975 976 977 978 979 980
		if (!forced) {
			/* This module can't be removed */
			ret = -EBUSY;
			goto out;
		}
	}

	/* Stop the machine so refcounts can't move and disable module. */
	ret = try_stop_module(mod, flags, &forced);
	if (ret != 0)
		goto out;

981
	mutex_unlock(&module_mutex);
Lucas De Marchi's avatar
Lucas De Marchi committed
982
	/* Final destruction now no one is using it. */
983
	if (mod->exit != NULL)
Linus Torvalds's avatar
Linus Torvalds committed
984
		mod->exit();
985 986
	blocking_notifier_call_chain(&module_notify_list,
				     MODULE_STATE_GOING, mod);
987 988
	ftrace_release_mod(mod);

989
	async_synchronize_full();
990

991
	/* Store the name of the last unloaded module for diagnostic purposes */
992
	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
Linus Torvalds's avatar
Linus Torvalds committed
993

994 995 996
	free_module(mod);
	return 0;
out:
997
	mutex_unlock(&module_mutex);
Linus Torvalds's avatar
Linus Torvalds committed
998 999 1000
	return ret;
}

1001
static inline void print_unload_info(struct seq_file *m, struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1002 1003 1004 1005
{
	struct module_use *use;
	int printed_something = 0;

1006
	seq_printf(m, " %i ", module_refcount(mod));
Linus Torvalds's avatar
Linus Torvalds committed
1007

1008 1009 1010 1011
	/*
	 * Always include a trailing , so userspace can differentiate
	 * between this and the old multi-field proc format.
	 */
1012
	list_for_each_entry(use, &mod->source_list, source_list) {
Linus Torvalds's avatar
Linus Torvalds committed
1013
		printed_something = 1;
1014
		seq_printf(m, "%s,", use->source->name);
Linus Torvalds's avatar
Linus Torvalds committed
1015 1016 1017 1018
	}

	if (mod->init != NULL && mod->exit == NULL) {
		printed_something = 1;
1019
		seq_puts(m, "[permanent],");
Linus Torvalds's avatar
Linus Torvalds committed
1020 1021 1022
	}

	if (!printed_something)
1023
		seq_puts(m, "-");
Linus Torvalds's avatar
Linus Torvalds committed
1024 1025 1026 1027 1028 1029
}

void __symbol_put(const char *symbol)
{
	struct module *owner;

Rusty Russell's avatar
Rusty Russell committed
1030
	preempt_disable();
1031
	if (!find_symbol(symbol, &owner, NULL, true, false))
Linus Torvalds's avatar
Linus Torvalds committed
1032 1033
		BUG();
	module_put(owner);
Rusty Russell's avatar
Rusty Russell committed
1034
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
1035 1036 1037
}
EXPORT_SYMBOL(__symbol_put);

1038
/* Note this assumes addr is a function, which it currently always is. */
Linus Torvalds's avatar
Linus Torvalds committed
1039 1040
void symbol_put_addr(void *addr)
{
1041
	struct module *modaddr;
1042
	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
Linus Torvalds's avatar
Linus Torvalds committed
1043

1044
	if (core_kernel_text(a))
1045
		return;
Linus Torvalds's avatar
Linus Torvalds committed
1046

1047 1048 1049 1050 1051
	/*
	 * Even though we hold a reference on the module; we still need to
	 * disable preemption in order to safely traverse the data structure.
	 */
	preempt_disable();
1052
	modaddr = __module_text_address(a);
1053
	BUG_ON(!modaddr);
1054
	module_put(modaddr);
1055
	preempt_enable();
Linus Torvalds's avatar
Linus Torvalds committed
1056 1057 1058 1059
}
EXPORT_SYMBOL_GPL(symbol_put_addr);

static ssize_t show_refcnt(struct module_attribute *mattr,
1060
			   struct module_kobject *mk, char *buffer)
Linus Torvalds's avatar
Linus Torvalds committed
1061
{
1062
	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
Linus Torvalds's avatar
Linus Torvalds committed
1063 1064
}

1065 1066
static struct module_attribute modinfo_refcnt =
	__ATTR(refcnt, 0444, show_refcnt, NULL);
Linus Torvalds's avatar
Linus Torvalds committed
1067

1068 1069 1070 1071
void __module_get(struct module *module)
{
	if (module) {
		preempt_disable();
1072
		atomic_inc(&module->refcnt);
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
		trace_module_get(module, _RET_IP_);
		preempt_enable();
	}
}
EXPORT_SYMBOL(__module_get);

bool try_module_get(struct module *module)
{
	bool ret = true;

	if (module) {
		preempt_disable();
1085 1086 1087
		/* Note: here, we can fail to get a reference */
		if (likely(module_is_live(module) &&
			   atomic_inc_not_zero(&module->refcnt) != 0))
1088
			trace_module_get(module, _RET_IP_);
1089
		else
1090 1091 1092 1093 1094 1095 1096 1097
			ret = false;

		preempt_enable();
	}
	return ret;
}
EXPORT_SYMBOL(try_module_get);

Al Viro's avatar
Al Viro committed
1098 1099
void module_put(struct module *module)
{
1100 1101
	int ret;

Al Viro's avatar
Al Viro committed
1102
	if (module) {
1103
		preempt_disable();
1104 1105
		ret = atomic_dec_if_positive(&module->refcnt);
		WARN_ON(ret < 0);	/* Failed to put refcount */
1106
		trace_module_put(module, _RET_IP_);
1107
		preempt_enable();
Al Viro's avatar
Al Viro committed
1108 1109 1110 1111
	}
}
EXPORT_SYMBOL(module_put);

Linus Torvalds's avatar
Linus Torvalds committed
1112
#else /* !CONFIG_MODULE_UNLOAD */
1113
static inline void print_unload_info(struct seq_file *m, struct module *mod)
Linus Torvalds's avatar
Linus Torvalds committed
1114 1115
{
	/* We don't know the usage count, or what modules are using. */
1116
	seq_puts(m, " - -");
Linus Torvalds's avatar
Linus Torvalds committed
1117 1118 1119 1120 1121 1122
}

static inline void module_unload_free(struct module *mod)
{
}

1123
int ref_module(struct module *a, struct module *b)
Linus Torvalds's avatar
Linus Torvalds committed
1124
{