device_cgroup.c 21.2 KB
Newer Older
1
/*
2
 * device_cgroup.c - device cgroup subsystem
3 4 5 6 7 8 9 10 11
 *
 * Copyright 2007 IBM Corp
 */

#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/uaccess.h>
12
#include <linux/seq_file.h>
13
#include <linux/slab.h>
14
#include <linux/rcupdate.h>
15
#include <linux/mutex.h>
16 17 18 19 20 21 22 23 24 25

#define ACC_MKNOD 1
#define ACC_READ  2
#define ACC_WRITE 4
#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)

#define DEV_BLOCK 1
#define DEV_CHAR  2
#define DEV_ALL   4  /* this represents all devices */

26 27
static DEFINE_MUTEX(devcgroup_mutex);

28 29 30 31 32 33
enum devcg_behavior {
	DEVCG_DEFAULT_NONE,
	DEVCG_DEFAULT_ALLOW,
	DEVCG_DEFAULT_DENY,
};

34
/*
35
 * exception list locking rules:
36
 * hold devcgroup_mutex for update/read.
37
 * hold rcu_read_lock() for read.
38 39
 */

40
struct dev_exception_item {
41 42 43 44
	u32 major, minor;
	short type;
	short access;
	struct list_head list;
45
	struct rcu_head rcu;
46 47 48 49
};

struct dev_cgroup {
	struct cgroup_subsys_state css;
50
	struct list_head exceptions;
51
	enum devcg_behavior behavior;
52 53
};

54 55
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
56
	return s ? container_of(s, struct dev_cgroup, css) : NULL;
57 58
}

59 60
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
61
	return css_to_devcgroup(task_css(task, devices_cgrp_id));
62 63
}

64
/*
65
 * called under devcgroup_mutex
66
 */
67
static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
68
{
69
	struct dev_exception_item *ex, *tmp, *new;
70

71 72
	lockdep_assert_held(&devcgroup_mutex);

73 74
	list_for_each_entry(ex, orig, list) {
		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
75 76 77 78 79 80 81 82
		if (!new)
			goto free_and_exit;
		list_add_tail(&new->list, dest);
	}

	return 0;

free_and_exit:
83 84 85
	list_for_each_entry_safe(ex, tmp, dest, list) {
		list_del(&ex->list);
		kfree(ex);
86 87 88 89 90
	}
	return -ENOMEM;
}

/*
91
 * called under devcgroup_mutex
92
 */
93 94
static int dev_exception_add(struct dev_cgroup *dev_cgroup,
			     struct dev_exception_item *ex)
95
{
96
	struct dev_exception_item *excopy, *walk;
97

98 99
	lockdep_assert_held(&devcgroup_mutex);

100 101
	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
	if (!excopy)
102 103
		return -ENOMEM;

104 105
	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
		if (walk->type != ex->type)
106
			continue;
107
		if (walk->major != ex->major)
108
			continue;
109
		if (walk->minor != ex->minor)
110 111
			continue;

112 113 114
		walk->access |= ex->access;
		kfree(excopy);
		excopy = NULL;
115 116
	}

117 118
	if (excopy != NULL)
		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
119 120 121 122
	return 0;
}

/*
123
 * called under devcgroup_mutex
124
 */
125 126
static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
			     struct dev_exception_item *ex)
127
{
128
	struct dev_exception_item *walk, *tmp;
129

130 131
	lockdep_assert_held(&devcgroup_mutex);

132 133
	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
		if (walk->type != ex->type)
134
			continue;
135
		if (walk->major != ex->major)
136
			continue;
137
		if (walk->minor != ex->minor)
138 139
			continue;

140
		walk->access &= ~ex->access;
141
		if (!walk->access) {
142
			list_del_rcu(&walk->list);
143
			kfree_rcu(walk, rcu);
144 145 146 147
		}
	}
}

148 149 150 151 152 153 154 155 156 157
static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
{
	struct dev_exception_item *ex, *tmp;

	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
		list_del_rcu(&ex->list);
		kfree_rcu(ex, rcu);
	}
}

158
/**
159 160
 * dev_exception_clean - frees all entries of the exception list
 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
161 162 163
 *
 * called under devcgroup_mutex
 */
164
static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
165
{
166 167
	lockdep_assert_held(&devcgroup_mutex);

168
	__dev_exception_clean(dev_cgroup);
169 170
}

171 172 173 174 175
static inline bool is_devcg_online(const struct dev_cgroup *devcg)
{
	return (devcg->behavior != DEVCG_DEFAULT_NONE);
}

176 177 178
/**
 * devcgroup_online - initializes devcgroup's behavior and exceptions based on
 * 		      parent's
179
 * @css: css getting online
180 181
 * returns 0 in case of success, error code otherwise
 */
182
static int devcgroup_online(struct cgroup_subsys_state *css)
183
{
184
	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
Tejun Heo's avatar
Tejun Heo committed
185
	struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	int ret = 0;

	mutex_lock(&devcgroup_mutex);

	if (parent_dev_cgroup == NULL)
		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
	else {
		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
					  &parent_dev_cgroup->exceptions);
		if (!ret)
			dev_cgroup->behavior = parent_dev_cgroup->behavior;
	}
	mutex_unlock(&devcgroup_mutex);

	return ret;
}

203
static void devcgroup_offline(struct cgroup_subsys_state *css)
204
{
205
	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
206 207 208 209 210 211

	mutex_lock(&devcgroup_mutex);
	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
	mutex_unlock(&devcgroup_mutex);
}

212 213 214
/*
 * called from kernel/cgroup.c with cgroup_lock() held.
 */
215 216
static struct cgroup_subsys_state *
devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
217
{
218
	struct dev_cgroup *dev_cgroup;
219 220 221 222

	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
	if (!dev_cgroup)
		return ERR_PTR(-ENOMEM);
223
	INIT_LIST_HEAD(&dev_cgroup->exceptions);
224
	dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
225 226 227 228

	return &dev_cgroup->css;
}

229
static void devcgroup_css_free(struct cgroup_subsys_state *css)
230
{
231
	struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
232

233
	__dev_exception_clean(dev_cgroup);
234 235 236 237 238
	kfree(dev_cgroup);
}

#define DEVCG_ALLOW 1
#define DEVCG_DENY 2
239 240
#define DEVCG_LIST 3

241
#define MAJMINLEN 13
242
#define ACCLEN 4
243 244 245 246

static void set_access(char *acc, short access)
{
	int idx = 0;
247
	memset(acc, 0, ACCLEN);
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
	if (access & ACC_READ)
		acc[idx++] = 'r';
	if (access & ACC_WRITE)
		acc[idx++] = 'w';
	if (access & ACC_MKNOD)
		acc[idx++] = 'm';
}

static char type_to_char(short type)
{
	if (type == DEV_ALL)
		return 'a';
	if (type == DEV_CHAR)
		return 'c';
	if (type == DEV_BLOCK)
		return 'b';
	return 'X';
}

267
static void set_majmin(char *str, unsigned m)
268 269
{
	if (m == ~0)
Li Zefan's avatar
Li Zefan committed
270
		strcpy(str, "*");
271
	else
Li Zefan's avatar
Li Zefan committed
272
		sprintf(str, "%u", m);
273 274
}

275
static int devcgroup_seq_show(struct seq_file *m, void *v)
276
{
277
	struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
278
	struct dev_exception_item *ex;
279
	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
280

281
	rcu_read_lock();
282 283 284 285 286 287
	/*
	 * To preserve the compatibility:
	 * - Only show the "all devices" when the default policy is to allow
	 * - List the exceptions in case the default policy is to deny
	 * This way, the file remains as a "whitelist of devices"
	 */
288
	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
289 290 291 292
		set_access(acc, ACC_MASK);
		set_majmin(maj, ~0);
		set_majmin(min, ~0);
		seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
293
			   maj, min, acc);
294
	} else {
295 296 297 298 299
		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
			set_access(acc, ex->access);
			set_majmin(maj, ex->major);
			set_majmin(min, ex->minor);
			seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
300 301
				   maj, min, acc);
		}
302
	}
303
	rcu_read_unlock();
304

305
	return 0;
306 307
}

308
/**
309
 * match_exception	- iterates the exception list trying to find a complete match
310 311 312 313 314 315
 * @exceptions: list of exceptions
 * @type: device type (DEV_BLOCK or DEV_CHAR)
 * @major: device file major number, ~0 to match all
 * @minor: device file minor number, ~0 to match all
 * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
 *
316 317 318 319
 * It is considered a complete match if an exception is found that will
 * contain the entire range of provided parameters.
 *
 * Return: true in case it matches an exception completely
320
 */
321 322
static bool match_exception(struct list_head *exceptions, short type,
			    u32 major, u32 minor, short access)
323
{
324
	struct dev_exception_item *ex;
325

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	list_for_each_entry_rcu(ex, exceptions, list) {
		if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
			continue;
		if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
			continue;
		if (ex->major != ~0 && ex->major != major)
			continue;
		if (ex->minor != ~0 && ex->minor != minor)
			continue;
		/* provided access cannot have more than the exception rule */
		if (access & (~ex->access))
			continue;
		return true;
	}
	return false;
}

/**
344
 * match_exception_partial - iterates the exception list trying to find a partial match
345 346 347 348 349 350
 * @exceptions: list of exceptions
 * @type: device type (DEV_BLOCK or DEV_CHAR)
 * @major: device file major number, ~0 to match all
 * @minor: device file minor number, ~0 to match all
 * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
 *
351 352 353 354 355 356
 * It is considered a partial match if an exception's range is found to
 * contain *any* of the devices specified by provided parameters. This is
 * used to make sure no extra access is being granted that is forbidden by
 * any of the exception list.
 *
 * Return: true in case the provided range mat matches an exception completely
357 358 359 360 361
 */
static bool match_exception_partial(struct list_head *exceptions, short type,
				    u32 major, u32 minor, short access)
{
	struct dev_exception_item *ex;
362

363 364
	list_for_each_entry_rcu(ex, exceptions, list) {
		if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
365
			continue;
366
		if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
367
			continue;
368 369 370 371 372
		/*
		 * We must be sure that both the exception and the provided
		 * range aren't masking all devices
		 */
		if (ex->major != ~0 && major != ~0 && ex->major != major)
373
			continue;
374
		if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
375
			continue;
376 377 378 379 380 381
		/*
		 * In order to make sure the provided range isn't matching
		 * an exception, all its access bits shouldn't match the
		 * exception's access bits
		 */
		if (!(access & ex->access))
382
			continue;
383
		return true;
384
	}
385 386 387 388
	return false;
}

/**
389
 * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
390 391 392
 * @dev_cgroup: dev cgroup to be tested against
 * @refex: new exception
 * @behavior: behavior of the exception's dev_cgroup
393 394 395
 *
 * This is used to make sure a child cgroup won't have more privileges
 * than its parent
396 397 398 399 400 401 402 403 404 405
 */
static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
		          struct dev_exception_item *refex,
		          enum devcg_behavior behavior)
{
	bool match = false;

	rcu_lockdep_assert(rcu_read_lock_held() ||
			   lockdep_is_held(&devcgroup_mutex),
			   "device_cgroup:verify_new_ex called without proper synchronization");
406

407 408
	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
		if (behavior == DEVCG_DEFAULT_ALLOW) {
409 410 411 412
			/*
			 * new exception in the child doesn't matter, only
			 * adding extra restrictions
			 */ 
413 414
			return true;
		} else {
415 416 417 418 419 420 421 422 423 424 425
			/*
			 * new exception in the child will add more devices
			 * that can be acessed, so it can't match any of
			 * parent's exceptions, even slightly
			 */ 
			match = match_exception_partial(&dev_cgroup->exceptions,
							refex->type,
							refex->major,
							refex->minor,
							refex->access);

426 427
			if (match)
				return false;
428
			return true;
429
		}
430
	} else {
431 432 433 434 435 436 437 438 439 440
		/*
		 * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
		 * the new exception will add access to more devices and must
		 * be contained completely in an parent's exception to be
		 * allowed
		 */
		match = match_exception(&dev_cgroup->exceptions, refex->type,
					refex->major, refex->minor,
					refex->access);

441 442
		if (match)
			/* parent has an exception that matches the proposed */
443
			return true;
444 445
		else
			return false;
446 447
	}
	return false;
448 449 450 451
}

/*
 * parent_has_perm:
452
 * when adding a new allow rule to a device exception list, the rule
453 454
 * must be allowed in the parent device
 */
455
static int parent_has_perm(struct dev_cgroup *childcg,
456
				  struct dev_exception_item *ex)
457
{
Tejun Heo's avatar
Tejun Heo committed
458
	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
459

Tejun Heo's avatar
Tejun Heo committed
460
	if (!parent)
461
		return 1;
462
	return verify_new_ex(parent, ex, childcg->behavior);
463 464
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478
/**
 * parent_allows_removal - verify if it's ok to remove an exception
 * @childcg: child cgroup from where the exception will be removed
 * @ex: exception being removed
 *
 * When removing an exception in cgroups with default ALLOW policy, it must
 * be checked if removing it will give the child cgroup more access than the
 * parent.
 *
 * Return: true if it's ok to remove exception, false otherwise
 */
static bool parent_allows_removal(struct dev_cgroup *childcg,
				  struct dev_exception_item *ex)
{
Tejun Heo's avatar
Tejun Heo committed
479
	struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495

	if (!parent)
		return true;

	/* It's always allowed to remove access to devices */
	if (childcg->behavior == DEVCG_DEFAULT_DENY)
		return true;

	/*
	 * Make sure you're not removing part or a whole exception existing in
	 * the parent cgroup
	 */
	return !match_exception_partial(&parent->exceptions, ex->type,
					ex->major, ex->minor, ex->access);
}

496 497 498 499 500 501 502 503
/**
 * may_allow_all - checks if it's possible to change the behavior to
 *		   allow based on parent's rules.
 * @parent: device cgroup's parent
 * returns: != 0 in case it's allowed, 0 otherwise
 */
static inline int may_allow_all(struct dev_cgroup *parent)
{
504 505
	if (!parent)
		return 1;
506 507 508
	return parent->behavior == DEVCG_DEFAULT_ALLOW;
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
/**
 * revalidate_active_exceptions - walks through the active exception list and
 * 				  revalidates the exceptions based on parent's
 * 				  behavior and exceptions. The exceptions that
 * 				  are no longer valid will be removed.
 * 				  Called with devcgroup_mutex held.
 * @devcg: cgroup which exceptions will be checked
 *
 * This is one of the three key functions for hierarchy implementation.
 * This function is responsible for re-evaluating all the cgroup's active
 * exceptions due to a parent's exception change.
 * Refer to Documentation/cgroups/devices.txt for more details.
 */
static void revalidate_active_exceptions(struct dev_cgroup *devcg)
{
	struct dev_exception_item *ex;
	struct list_head *this, *tmp;

	list_for_each_safe(this, tmp, &devcg->exceptions) {
		ex = container_of(this, struct dev_exception_item, list);
		if (!parent_has_perm(devcg, ex))
			dev_exception_rm(devcg, ex);
	}
}

/**
 * propagate_exception - propagates a new exception to the children
 * @devcg_root: device cgroup that added a new exception
 * @ex: new exception to be propagated
 *
 * returns: 0 in case of success, != 0 in case of error
 */
static int propagate_exception(struct dev_cgroup *devcg_root,
			       struct dev_exception_item *ex)
{
544
	struct cgroup_subsys_state *pos;
545 546
	int rc = 0;

547
	rcu_read_lock();
548

549 550
	css_for_each_descendant_pre(pos, &devcg_root->css) {
		struct dev_cgroup *devcg = css_to_devcgroup(pos);
551 552 553 554 555 556 557

		/*
		 * Because devcgroup_mutex is held, no devcg will become
		 * online or offline during the tree walk (see on/offline
		 * methods), and online ones are safe to access outside RCU
		 * read lock without bumping refcnt.
		 */
558
		if (pos == &devcg_root->css || !is_devcg_online(devcg))
559 560 561
			continue;

		rcu_read_unlock();
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582

		/*
		 * in case both root's behavior and devcg is allow, a new
		 * restriction means adding to the exception list
		 */
		if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
		    devcg->behavior == DEVCG_DEFAULT_ALLOW) {
			rc = dev_exception_add(devcg, ex);
			if (rc)
				break;
		} else {
			/*
			 * in the other possible cases:
			 * root's behavior: allow, devcg's: deny
			 * root's behavior: deny, devcg's: deny
			 * the exception will be removed
			 */
			dev_exception_rm(devcg, ex);
		}
		revalidate_active_exceptions(devcg);

583
		rcu_read_lock();
584
	}
585 586

	rcu_read_unlock();
587 588 589 590 591 592 593 594 595 596
	return rc;
}

static inline bool has_children(struct dev_cgroup *devcgroup)
{
	struct cgroup *cgrp = devcgroup->css.cgroup;

	return !list_empty(&cgrp->children);
}

597
/*
598
 * Modify the exception list using allow/deny rules.
599 600
 * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
 * so we can give a container CAP_MKNOD to let it create devices but not
601
 * modify the exception list.
602 603
 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
 * us to also grant CAP_SYS_ADMIN to containers without giving away the
604
 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
605 606 607 608 609
 *
 * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
 * new access is only allowed if you're in the top-level cgroup, or your
 * parent cgroup has the access you're asking for.
 */
610
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
611
				   int filetype, char *buffer)
612
{
613
	const char *b;
614
	char temp[12];		/* 11 + 1 characters needed for a u32 */
615
	int count, rc = 0;
616
	struct dev_exception_item ex;
Tejun Heo's avatar
Tejun Heo committed
617
	struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
618 619 620 621

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

622
	memset(&ex, 0, sizeof(ex));
623 624 625 626
	b = buffer;

	switch (*b) {
	case 'a':
627 628
		switch (filetype) {
		case DEVCG_ALLOW:
629 630 631
			if (has_children(devcgroup))
				return -EINVAL;

632
			if (!may_allow_all(parent))
633
				return -EPERM;
634
			dev_exception_clean(devcgroup);
635 636 637 638
			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
			if (!parent)
				break;

639 640 641 642
			rc = dev_exceptions_copy(&devcgroup->exceptions,
						 &parent->exceptions);
			if (rc)
				return rc;
643 644
			break;
		case DEVCG_DENY:
645 646 647
			if (has_children(devcgroup))
				return -EINVAL;

648
			dev_exception_clean(devcgroup);
649
			devcgroup->behavior = DEVCG_DEFAULT_DENY;
650 651 652 653 654
			break;
		default:
			return -EINVAL;
		}
		return 0;
655
	case 'b':
656
		ex.type = DEV_BLOCK;
657 658
		break;
	case 'c':
659
		ex.type = DEV_CHAR;
660 661
		break;
	default:
662
		return -EINVAL;
663 664
	}
	b++;
665 666
	if (!isspace(*b))
		return -EINVAL;
667 668
	b++;
	if (*b == '*') {
669
		ex.major = ~0;
670 671
		b++;
	} else if (isdigit(*b)) {
672 673 674 675 676 677 678 679 680 681
		memset(temp, 0, sizeof(temp));
		for (count = 0; count < sizeof(temp) - 1; count++) {
			temp[count] = *b;
			b++;
			if (!isdigit(*b))
				break;
		}
		rc = kstrtou32(temp, 10, &ex.major);
		if (rc)
			return -EINVAL;
682
	} else {
683
		return -EINVAL;
684
	}
685 686
	if (*b != ':')
		return -EINVAL;
687 688 689 690
	b++;

	/* read minor */
	if (*b == '*') {
691
		ex.minor = ~0;
692 693
		b++;
	} else if (isdigit(*b)) {
694 695 696 697 698 699 700 701 702 703
		memset(temp, 0, sizeof(temp));
		for (count = 0; count < sizeof(temp) - 1; count++) {
			temp[count] = *b;
			b++;
			if (!isdigit(*b))
				break;
		}
		rc = kstrtou32(temp, 10, &ex.minor);
		if (rc)
			return -EINVAL;
704
	} else {
705
		return -EINVAL;
706
	}
707 708
	if (!isspace(*b))
		return -EINVAL;
709 710 711
	for (b++, count = 0; count < 3; count++, b++) {
		switch (*b) {
		case 'r':
712
			ex.access |= ACC_READ;
713 714
			break;
		case 'w':
715
			ex.access |= ACC_WRITE;
716 717
			break;
		case 'm':
718
			ex.access |= ACC_MKNOD;
719 720 721 722 723 724
			break;
		case '\n':
		case '\0':
			count = 3;
			break;
		default:
725
			return -EINVAL;
726 727 728 729 730
		}
	}

	switch (filetype) {
	case DEVCG_ALLOW:
731 732 733 734 735
		/*
		 * If the default policy is to allow by default, try to remove
		 * an matching exception instead. And be silent about it: we
		 * don't want to break compatibility
		 */
736
		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
737 738 739
			/* Check if the parent allows removing it first */
			if (!parent_allows_removal(devcgroup, &ex))
				return -EPERM;
740
			dev_exception_rm(devcgroup, &ex);
741
			break;
742
		}
743 744 745

		if (!parent_has_perm(devcgroup, &ex))
			return -EPERM;
746 747
		rc = dev_exception_add(devcgroup, &ex);
		break;
748
	case DEVCG_DENY:
749 750 751 752 753
		/*
		 * If the default policy is to deny by default, try to remove
		 * an matching exception instead. And be silent about it: we
		 * don't want to break compatibility
		 */
754
		if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
755
			dev_exception_rm(devcgroup, &ex);
756 757 758 759 760 761 762 763
		else
			rc = dev_exception_add(devcgroup, &ex);

		if (rc)
			break;
		/* we only propagate new restrictions */
		rc = propagate_exception(devcgroup, &ex);
		break;
764
	default:
765
		rc = -EINVAL;
766
	}
767
	return rc;
768
}
769

770 771
static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
				      char *buf, size_t nbytes, loff_t off)
772 773
{
	int retval;
774 775

	mutex_lock(&devcgroup_mutex);
776 777
	retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
					 of_cft(of)->private, strstrip(buf));
778
	mutex_unlock(&devcgroup_mutex);
779
	return retval ?: nbytes;
780 781 782 783 784
}

static struct cftype dev_cgroup_files[] = {
	{
		.name = "allow",
785
		.write = devcgroup_access_write,
786 787 788 789
		.private = DEVCG_ALLOW,
	},
	{
		.name = "deny",
790
		.write = devcgroup_access_write,
791 792
		.private = DEVCG_DENY,
	},
793 794
	{
		.name = "list",
795
		.seq_show = devcgroup_seq_show,
796 797
		.private = DEVCG_LIST,
	},
798
	{ }	/* terminate */
799 800
};

801
struct cgroup_subsys devices_cgrp_subsys = {
802 803
	.css_alloc = devcgroup_css_alloc,
	.css_free = devcgroup_css_free,
804 805
	.css_online = devcgroup_online,
	.css_offline = devcgroup_offline,
806
	.base_cftypes = dev_cgroup_files,
807 808
};

809 810 811 812 813 814 815 816 817 818
/**
 * __devcgroup_check_permission - checks if an inode operation is permitted
 * @dev_cgroup: the dev cgroup to be tested against
 * @type: device type
 * @major: device major number
 * @minor: device minor number
 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
 *
 * returns 0 on success, -EPERM case the operation is not permitted
 */
819
static int __devcgroup_check_permission(short type, u32 major, u32 minor,
820
				        short access)
821
{
822
	struct dev_cgroup *dev_cgroup;
823
	bool rc;
824

825
	rcu_read_lock();
826
	dev_cgroup = task_devcgroup(current);
827 828 829 830 831 832 833 834
	if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
		/* Can't match any of the exceptions, even partially */
		rc = !match_exception_partial(&dev_cgroup->exceptions,
					      type, major, minor, access);
	else
		/* Need to match completely one exception to be allowed */
		rc = match_exception(&dev_cgroup->exceptions, type, major,
				     minor, access);
835
	rcu_read_unlock();
836

837 838
	if (!rc)
		return -EPERM;
839

840 841
	return 0;
}
842

843 844 845 846 847 848 849 850 851 852 853 854 855
int __devcgroup_inode_permission(struct inode *inode, int mask)
{
	short type, access = 0;

	if (S_ISBLK(inode->i_mode))
		type = DEV_BLOCK;
	if (S_ISCHR(inode->i_mode))
		type = DEV_CHAR;
	if (mask & MAY_WRITE)
		access |= ACC_WRITE;
	if (mask & MAY_READ)
		access |= ACC_READ;

856 857
	return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
			access);
858 859 860 861
}

int devcgroup_inode_mknod(int mode, dev_t dev)
{
862
	short type;
863

864 865 866
	if (!S_ISBLK(mode) && !S_ISCHR(mode))
		return 0;

867 868 869 870
	if (S_ISBLK(mode))
		type = DEV_BLOCK;
	else
		type = DEV_CHAR;
871

872 873
	return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
			ACC_MKNOD);
874

875
}