device_cgroup.c 14.7 KB
Newer Older
1
/*
2
 * device_cgroup.c - device cgroup subsystem
3 4 5 6 7 8 9 10 11
 *
 * Copyright 2007 IBM Corp
 */

#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/uaccess.h>
12
#include <linux/seq_file.h>
13
#include <linux/slab.h>
14
#include <linux/rcupdate.h>
15
#include <linux/mutex.h>
16 17 18 19 20 21 22 23 24 25

#define ACC_MKNOD 1
#define ACC_READ  2
#define ACC_WRITE 4
#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)

#define DEV_BLOCK 1
#define DEV_CHAR  2
#define DEV_ALL   4  /* this represents all devices */

26 27
static DEFINE_MUTEX(devcgroup_mutex);

28
/*
29
 * exception list locking rules:
30
 * hold devcgroup_mutex for update/read.
31
 * hold rcu_read_lock() for read.
32 33
 */

34
struct dev_exception_item {
35 36 37 38
	u32 major, minor;
	short type;
	short access;
	struct list_head list;
39
	struct rcu_head rcu;
40 41 42 43
};

struct dev_cgroup {
	struct cgroup_subsys_state css;
44
	struct list_head exceptions;
45 46 47 48
	enum {
		DEVCG_DEFAULT_ALLOW,
		DEVCG_DEFAULT_DENY,
	} behavior;
49 50
};

51 52 53 54 55
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
	return container_of(s, struct dev_cgroup, css);
}

56 57
static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
{
58
	return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
59 60
}

61 62 63 64 65
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
	return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
}

66 67
struct cgroup_subsys devices_subsys;

68 69
static int devcgroup_can_attach(struct cgroup *new_cgrp,
				struct cgroup_taskset *set)
70
{
71
	struct task_struct *task = cgroup_taskset_first(set);
72

73 74
	if (current != task && !capable(CAP_SYS_ADMIN))
		return -EPERM;
75 76 77 78
	return 0;
}

/*
79
 * called under devcgroup_mutex
80
 */
81
static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
82
{
83
	struct dev_exception_item *ex, *tmp, *new;
84

85 86
	lockdep_assert_held(&devcgroup_mutex);

87 88
	list_for_each_entry(ex, orig, list) {
		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
89 90 91 92 93 94 95 96
		if (!new)
			goto free_and_exit;
		list_add_tail(&new->list, dest);
	}

	return 0;

free_and_exit:
97 98 99
	list_for_each_entry_safe(ex, tmp, dest, list) {
		list_del(&ex->list);
		kfree(ex);
100 101 102 103 104
	}
	return -ENOMEM;
}

/*
105
 * called under devcgroup_mutex
106
 */
107 108
static int dev_exception_add(struct dev_cgroup *dev_cgroup,
			     struct dev_exception_item *ex)
109
{
110
	struct dev_exception_item *excopy, *walk;
111

112 113
	lockdep_assert_held(&devcgroup_mutex);

114 115
	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
	if (!excopy)
116 117
		return -ENOMEM;

118 119
	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
		if (walk->type != ex->type)
120
			continue;
121
		if (walk->major != ex->major)
122
			continue;
123
		if (walk->minor != ex->minor)
124 125
			continue;

126 127 128
		walk->access |= ex->access;
		kfree(excopy);
		excopy = NULL;
129 130
	}

131 132
	if (excopy != NULL)
		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
133 134 135 136
	return 0;
}

/*
137
 * called under devcgroup_mutex
138
 */
139 140
static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
			     struct dev_exception_item *ex)
141
{
142
	struct dev_exception_item *walk, *tmp;
143

144 145
	lockdep_assert_held(&devcgroup_mutex);

146 147
	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
		if (walk->type != ex->type)
148
			continue;
149
		if (walk->major != ex->major)
150
			continue;
151
		if (walk->minor != ex->minor)
152 153
			continue;

154
		walk->access &= ~ex->access;
155
		if (!walk->access) {
156
			list_del_rcu(&walk->list);
157
			kfree_rcu(walk, rcu);
158 159 160 161
		}
	}
}

162
/**
163 164
 * dev_exception_clean - frees all entries of the exception list
 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
165 166 167
 *
 * called under devcgroup_mutex
 */
168
static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
169
{
170
	struct dev_exception_item *ex, *tmp;
171

172 173
	lockdep_assert_held(&devcgroup_mutex);

174
	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
Tejun Heo's avatar
Tejun Heo committed
175 176
		list_del_rcu(&ex->list);
		kfree_rcu(ex, rcu);
177 178 179
	}
}

180 181 182
/*
 * called from kernel/cgroup.c with cgroup_lock() held.
 */
183
static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
184 185 186 187 188 189 190 191
{
	struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
	struct cgroup *parent_cgroup;
	int ret;

	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
	if (!dev_cgroup)
		return ERR_PTR(-ENOMEM);
192
	INIT_LIST_HEAD(&dev_cgroup->exceptions);
193 194
	parent_cgroup = cgroup->parent;

195
	if (parent_cgroup == NULL)
196
		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
197
	else {
198
		parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
199
		mutex_lock(&devcgroup_mutex);
200 201
		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
					  &parent_dev_cgroup->exceptions);
202
		dev_cgroup->behavior = parent_dev_cgroup->behavior;
203
		mutex_unlock(&devcgroup_mutex);
204 205 206 207 208 209 210 211 212
		if (ret) {
			kfree(dev_cgroup);
			return ERR_PTR(ret);
		}
	}

	return &dev_cgroup->css;
}

213
static void devcgroup_destroy(struct cgroup *cgroup)
214 215 216 217
{
	struct dev_cgroup *dev_cgroup;

	dev_cgroup = cgroup_to_devcgroup(cgroup);
218
	dev_exception_clean(dev_cgroup);
219 220 221 222 223
	kfree(dev_cgroup);
}

#define DEVCG_ALLOW 1
#define DEVCG_DENY 2
224 225
#define DEVCG_LIST 3

226
#define MAJMINLEN 13
227
#define ACCLEN 4
228 229 230 231

static void set_access(char *acc, short access)
{
	int idx = 0;
232
	memset(acc, 0, ACCLEN);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	if (access & ACC_READ)
		acc[idx++] = 'r';
	if (access & ACC_WRITE)
		acc[idx++] = 'w';
	if (access & ACC_MKNOD)
		acc[idx++] = 'm';
}

static char type_to_char(short type)
{
	if (type == DEV_ALL)
		return 'a';
	if (type == DEV_CHAR)
		return 'c';
	if (type == DEV_BLOCK)
		return 'b';
	return 'X';
}

252
static void set_majmin(char *str, unsigned m)
253 254
{
	if (m == ~0)
Li Zefan's avatar
Li Zefan committed
255
		strcpy(str, "*");
256
	else
Li Zefan's avatar
Li Zefan committed
257
		sprintf(str, "%u", m);
258 259
}

260 261
static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
				struct seq_file *m)
262
{
263
	struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
264
	struct dev_exception_item *ex;
265
	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
266

267
	rcu_read_lock();
268 269 270 271 272 273
	/*
	 * To preserve the compatibility:
	 * - Only show the "all devices" when the default policy is to allow
	 * - List the exceptions in case the default policy is to deny
	 * This way, the file remains as a "whitelist of devices"
	 */
274
	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
275 276 277 278
		set_access(acc, ACC_MASK);
		set_majmin(maj, ~0);
		set_majmin(min, ~0);
		seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
279
			   maj, min, acc);
280
	} else {
281 282 283 284 285
		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
			set_access(acc, ex->access);
			set_majmin(maj, ex->major);
			set_majmin(min, ex->minor);
			seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
286 287
				   maj, min, acc);
		}
288
	}
289
	rcu_read_unlock();
290

291
	return 0;
292 293
}

294
/**
295 296 297 298 299
 * may_access - verifies if a new exception is part of what is allowed
 *		by a dev cgroup based on the default policy +
 *		exceptions. This is used to make sure a child cgroup
 *		won't have more privileges than its parent or to
 *		verify if a certain access is allowed.
300
 * @dev_cgroup: dev cgroup to be tested against
301
 * @refex: new exception
302
 */
303 304
static int may_access(struct dev_cgroup *dev_cgroup,
		      struct dev_exception_item *refex)
305
{
306
	struct dev_exception_item *ex;
307
	bool match = false;
308

309 310 311 312
	rcu_lockdep_assert(rcu_read_lock_held() ||
			   lockdep_is_held(&devcgroup_mutex),
			   "device_cgroup::may_access() called without proper synchronization");

Tejun Heo's avatar
Tejun Heo committed
313
	list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
314
		if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
315
			continue;
316
		if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
317
			continue;
318
		if (ex->major != ~0 && ex->major != refex->major)
319
			continue;
320
		if (ex->minor != ~0 && ex->minor != refex->minor)
321
			continue;
322
		if (refex->access & (~ex->access))
323
			continue;
324 325
		match = true;
		break;
326
	}
327 328

	/*
329
	 * In two cases we'll consider this new exception valid:
330
	 * - the dev cgroup has its default policy to allow + exception list:
331
	 *   the new exception should *not* match any of the exceptions
332
	 *   (behavior == DEVCG_DEFAULT_ALLOW, !match)
333
	 * - the dev cgroup has its default policy to deny + exception list:
334
	 *   the new exception *should* match the exceptions
335
	 *   (behavior == DEVCG_DEFAULT_DENY, match)
336
	 */
337
	if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
338
		return 1;
339 340 341 342 343
	return 0;
}

/*
 * parent_has_perm:
344
 * when adding a new allow rule to a device exception list, the rule
345 346
 * must be allowed in the parent device
 */
347
static int parent_has_perm(struct dev_cgroup *childcg,
348
				  struct dev_exception_item *ex)
349
{
350
	struct cgroup *pcg = childcg->css.cgroup->parent;
351 352 353 354 355
	struct dev_cgroup *parent;

	if (!pcg)
		return 1;
	parent = cgroup_to_devcgroup(pcg);
356
	return may_access(parent, ex);
357 358
}

359 360 361 362 363 364 365 366
/**
 * may_allow_all - checks if it's possible to change the behavior to
 *		   allow based on parent's rules.
 * @parent: device cgroup's parent
 * returns: != 0 in case it's allowed, 0 otherwise
 */
static inline int may_allow_all(struct dev_cgroup *parent)
{
367 368
	if (!parent)
		return 1;
369 370 371
	return parent->behavior == DEVCG_DEFAULT_ALLOW;
}

372
/*
373
 * Modify the exception list using allow/deny rules.
374 375
 * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
 * so we can give a container CAP_MKNOD to let it create devices but not
376
 * modify the exception list.
377 378
 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
 * us to also grant CAP_SYS_ADMIN to containers without giving away the
379
 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
380 381 382 383 384
 *
 * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
 * new access is only allowed if you're in the top-level cgroup, or your
 * parent cgroup has the access you're asking for.
 */
385 386
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
				   int filetype, const char *buffer)
387
{
388
	const char *b;
389 390
	char temp[12];		/* 11 + 1 characters needed for a u32 */
	int count, rc;
391
	struct dev_exception_item ex;
392
	struct cgroup *p = devcgroup->css.cgroup;
393
	struct dev_cgroup *parent = NULL;
394 395 396 397

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

398 399 400
	if (p->parent)
		parent = cgroup_to_devcgroup(p->parent);

401
	memset(&ex, 0, sizeof(ex));
402 403 404 405
	b = buffer;

	switch (*b) {
	case 'a':
406 407
		switch (filetype) {
		case DEVCG_ALLOW:
408
			if (!may_allow_all(parent))
409
				return -EPERM;
410
			dev_exception_clean(devcgroup);
411 412 413 414
			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
			if (!parent)
				break;

415 416 417 418
			rc = dev_exceptions_copy(&devcgroup->exceptions,
						 &parent->exceptions);
			if (rc)
				return rc;
419 420
			break;
		case DEVCG_DENY:
421
			dev_exception_clean(devcgroup);
422
			devcgroup->behavior = DEVCG_DEFAULT_DENY;
423 424 425 426 427
			break;
		default:
			return -EINVAL;
		}
		return 0;
428
	case 'b':
429
		ex.type = DEV_BLOCK;
430 431
		break;
	case 'c':
432
		ex.type = DEV_CHAR;
433 434
		break;
	default:
435
		return -EINVAL;
436 437
	}
	b++;
438 439
	if (!isspace(*b))
		return -EINVAL;
440 441
	b++;
	if (*b == '*') {
442
		ex.major = ~0;
443 444
		b++;
	} else if (isdigit(*b)) {
445 446 447 448 449 450 451 452 453 454
		memset(temp, 0, sizeof(temp));
		for (count = 0; count < sizeof(temp) - 1; count++) {
			temp[count] = *b;
			b++;
			if (!isdigit(*b))
				break;
		}
		rc = kstrtou32(temp, 10, &ex.major);
		if (rc)
			return -EINVAL;
455
	} else {
456
		return -EINVAL;
457
	}
458 459
	if (*b != ':')
		return -EINVAL;
460 461 462 463
	b++;

	/* read minor */
	if (*b == '*') {
464
		ex.minor = ~0;
465 466
		b++;
	} else if (isdigit(*b)) {
467 468 469 470 471 472 473 474 475 476
		memset(temp, 0, sizeof(temp));
		for (count = 0; count < sizeof(temp) - 1; count++) {
			temp[count] = *b;
			b++;
			if (!isdigit(*b))
				break;
		}
		rc = kstrtou32(temp, 10, &ex.minor);
		if (rc)
			return -EINVAL;
477
	} else {
478
		return -EINVAL;
479
	}
480 481
	if (!isspace(*b))
		return -EINVAL;
482 483 484
	for (b++, count = 0; count < 3; count++, b++) {
		switch (*b) {
		case 'r':
485
			ex.access |= ACC_READ;
486 487
			break;
		case 'w':
488
			ex.access |= ACC_WRITE;
489 490
			break;
		case 'm':
491
			ex.access |= ACC_MKNOD;
492 493 494 495 496 497
			break;
		case '\n':
		case '\0':
			count = 3;
			break;
		default:
498
			return -EINVAL;
499 500 501 502 503
		}
	}

	switch (filetype) {
	case DEVCG_ALLOW:
504
		if (!parent_has_perm(devcgroup, &ex))
505
			return -EPERM;
506 507 508 509 510
		/*
		 * If the default policy is to allow by default, try to remove
		 * an matching exception instead. And be silent about it: we
		 * don't want to break compatibility
		 */
511
		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
512
			dev_exception_rm(devcgroup, &ex);
513 514
			return 0;
		}
515
		return dev_exception_add(devcgroup, &ex);
516
	case DEVCG_DENY:
517 518 519 520 521
		/*
		 * If the default policy is to deny by default, try to remove
		 * an matching exception instead. And be silent about it: we
		 * don't want to break compatibility
		 */
522
		if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
523
			dev_exception_rm(devcgroup, &ex);
524 525
			return 0;
		}
526
		return dev_exception_add(devcgroup, &ex);
527
	default:
528
		return -EINVAL;
529
	}
530 531
	return 0;
}
532

533 534 535 536
static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
				  const char *buffer)
{
	int retval;
537 538

	mutex_lock(&devcgroup_mutex);
539 540
	retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
					 cft->private, buffer);
541
	mutex_unlock(&devcgroup_mutex);
542 543 544 545 546 547
	return retval;
}

static struct cftype dev_cgroup_files[] = {
	{
		.name = "allow",
548
		.write_string  = devcgroup_access_write,
549 550 551 552
		.private = DEVCG_ALLOW,
	},
	{
		.name = "deny",
553
		.write_string = devcgroup_access_write,
554 555
		.private = DEVCG_DENY,
	},
556 557 558 559 560
	{
		.name = "list",
		.read_seq_string = devcgroup_seq_read,
		.private = DEVCG_LIST,
	},
561
	{ }	/* terminate */
562 563 564 565 566 567
};

struct cgroup_subsys devices_subsys = {
	.name = "devices",
	.can_attach = devcgroup_can_attach,
	.create = devcgroup_create,
568
	.destroy = devcgroup_destroy,
569
	.subsys_id = devices_subsys_id,
570
	.base_cftypes = dev_cgroup_files,
571 572 573 574 575 576 577 578 579

	/*
	 * While devices cgroup has the rudimentary hierarchy support which
	 * checks the parent's restriction, it doesn't properly propagates
	 * config changes in ancestors to their descendents.  A child
	 * should only be allowed to add more restrictions to the parent's
	 * configuration.  Fix it and remove the following.
	 */
	.broken_hierarchy = true,
580 581
};

582 583 584 585 586 587 588 589 590 591
/**
 * __devcgroup_check_permission - checks if an inode operation is permitted
 * @dev_cgroup: the dev cgroup to be tested against
 * @type: device type
 * @major: device major number
 * @minor: device minor number
 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
 *
 * returns 0 on success, -EPERM case the operation is not permitted
 */
592
static int __devcgroup_check_permission(short type, u32 major, u32 minor,
593
				        short access)
594
{
595
	struct dev_cgroup *dev_cgroup;
596
	struct dev_exception_item ex;
597
	int rc;
598

599 600 601 602 603
	memset(&ex, 0, sizeof(ex));
	ex.type = type;
	ex.major = major;
	ex.minor = minor;
	ex.access = access;
604

605
	rcu_read_lock();
606
	dev_cgroup = task_devcgroup(current);
607
	rc = may_access(dev_cgroup, &ex);
608
	rcu_read_unlock();
609

610 611
	if (!rc)
		return -EPERM;
612

613 614
	return 0;
}
615

616 617 618 619 620 621 622 623 624 625 626 627 628
int __devcgroup_inode_permission(struct inode *inode, int mask)
{
	short type, access = 0;

	if (S_ISBLK(inode->i_mode))
		type = DEV_BLOCK;
	if (S_ISCHR(inode->i_mode))
		type = DEV_CHAR;
	if (mask & MAY_WRITE)
		access |= ACC_WRITE;
	if (mask & MAY_READ)
		access |= ACC_READ;

629 630
	return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
			access);
631 632 633 634
}

int devcgroup_inode_mknod(int mode, dev_t dev)
{
635
	short type;
636

637 638 639
	if (!S_ISBLK(mode) && !S_ISCHR(mode))
		return 0;

640 641 642 643
	if (S_ISBLK(mode))
		type = DEV_BLOCK;
	else
		type = DEV_CHAR;
644

645 646
	return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
			ACC_MKNOD);
647

648
}