Commit 2ab3f29d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'akpm' (Andrew's fixes)

Merge misc fixes from Andrew Morton:
 "18 total.  15 fixes and some updates to a device_cgroup patchset which
  bring it up to date with the version which I should have merged in the
  first place."

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (18 patches)
  fs/compat_ioctl.c: VIDEO_SET_SPU_PALETTE missing error check
  gen_init_cpio: avoid stack overflow when expanding
  drivers/rtc/rtc-imxdi.c: add missing spin lock initialization
  mm, numa: avoid setting zone_reclaim_mode unless a node is sufficiently distant
  pidns: limit the nesting depth of pid namespaces
  drivers/dma/dw_dmac: make driver's endianness configurable
  mm/mmu_notifier: allocate mmu_notifier in advance
  tools/testing/selftests/epoll/test_epoll.c: fix build
  UAPI: fix tools/vm/page-types.c
  mm/page_alloc.c:alloc_contig_range(): return early for err path
  rbtree: include linux/compiler.h for definition of __always_inline
  genalloc: stop crashing the system when destroying a pool
  backlight: ili9320: add missing SPI dependency
  device_cgroup: add proper checking when changing default behavior
  device_cgroup: stop using simple_strtoul()
  device_cgroup: rename deny_all to behavior
  cgroup: fix invalid rcu dereference
  mm: fix XFS oops due to dirty pages without buffers on s390
parents b1e4279e 12176503
...@@ -90,6 +90,17 @@ config DW_DMAC ...@@ -90,6 +90,17 @@ config DW_DMAC
Support the Synopsys DesignWare AHB DMA controller. This Support the Synopsys DesignWare AHB DMA controller. This
can be integrated in chips such as the Atmel AT32ap7000. can be integrated in chips such as the Atmel AT32ap7000.
config DW_DMAC_BIG_ENDIAN_IO
bool "Use big endian I/O register access"
default y if AVR32
depends on DW_DMAC
help
Say yes here to use big endian I/O access when reading and writing
to the DMA controller registers. This is needed on some platforms,
like the Atmel AVR32 architecture.
If unsure, use the default setting.
config AT_HDMAC config AT_HDMAC
tristate "Atmel AHB DMA support" tristate "Atmel AHB DMA support"
depends on ARCH_AT91 depends on ARCH_AT91
......
...@@ -98,9 +98,17 @@ struct dw_dma_regs { ...@@ -98,9 +98,17 @@ struct dw_dma_regs {
u32 DW_PARAMS; u32 DW_PARAMS;
}; };
#ifdef CONFIG_DW_DMAC_BIG_ENDIAN_IO
#define dma_readl_native ioread32be
#define dma_writel_native iowrite32be
#else
#define dma_readl_native readl
#define dma_writel_native writel
#endif
/* To access the registers in early stage of probe */ /* To access the registers in early stage of probe */
#define dma_read_byaddr(addr, name) \ #define dma_read_byaddr(addr, name) \
readl((addr) + offsetof(struct dw_dma_regs, name)) dma_readl_native((addr) + offsetof(struct dw_dma_regs, name))
/* Bitfields in DW_PARAMS */ /* Bitfields in DW_PARAMS */
#define DW_PARAMS_NR_CHAN 8 /* number of channels */ #define DW_PARAMS_NR_CHAN 8 /* number of channels */
...@@ -216,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc) ...@@ -216,9 +224,9 @@ __dwc_regs(struct dw_dma_chan *dwc)
} }
#define channel_readl(dwc, name) \ #define channel_readl(dwc, name) \
readl(&(__dwc_regs(dwc)->name)) dma_readl_native(&(__dwc_regs(dwc)->name))
#define channel_writel(dwc, name, val) \ #define channel_writel(dwc, name, val) \
writel((val), &(__dwc_regs(dwc)->name)) dma_writel_native((val), &(__dwc_regs(dwc)->name))
static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan)
{ {
...@@ -246,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) ...@@ -246,9 +254,9 @@ static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw)
} }
#define dma_readl(dw, name) \ #define dma_readl(dw, name) \
readl(&(__dw_regs(dw)->name)) dma_readl_native(&(__dw_regs(dw)->name))
#define dma_writel(dw, name, val) \ #define dma_writel(dw, name, val) \
writel((val), &(__dw_regs(dw)->name)) dma_writel_native((val), &(__dw_regs(dw)->name))
#define channel_set_bit(dw, reg, mask) \ #define channel_set_bit(dw, reg, mask) \
dma_writel(dw, reg, ((mask) << 8) | (mask)) dma_writel(dw, reg, ((mask) << 8) | (mask))
......
...@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev) ...@@ -392,6 +392,8 @@ static int dryice_rtc_probe(struct platform_device *pdev)
if (imxdi->ioaddr == NULL) if (imxdi->ioaddr == NULL)
return -ENOMEM; return -ENOMEM;
spin_lock_init(&imxdi->irq_lock);
imxdi->irq = platform_get_irq(pdev, 0); imxdi->irq = platform_get_irq(pdev, 0);
if (imxdi->irq < 0) if (imxdi->irq < 0)
return imxdi->irq; return imxdi->irq;
......
...@@ -60,7 +60,8 @@ config LCD_LTV350QV ...@@ -60,7 +60,8 @@ config LCD_LTV350QV
The LTV350QV panel is present on all ATSTK1000 boards. The LTV350QV panel is present on all ATSTK1000 boards.
config LCD_ILI9320 config LCD_ILI9320
tristate tristate "ILI Technology ILI9320 controller support"
depends on SPI
help help
If you have a panel based on the ILI9320 controller chip If you have a panel based on the ILI9320 controller chip
then say y to include a power driver for it. then say y to include a power driver for it.
......
...@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, ...@@ -210,6 +210,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd,
err = get_user(palp, &up->palette); err = get_user(palp, &up->palette);
err |= get_user(length, &up->length); err |= get_user(length, &up->length);
if (err)
return -EFAULT;
up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
err = put_user(compat_ptr(palp), &up_native->palette); err = put_user(compat_ptr(palp), &up_native->palette);
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#ifndef _LINUX_RBTREE_AUGMENTED_H #ifndef _LINUX_RBTREE_AUGMENTED_H
#define _LINUX_RBTREE_AUGMENTED_H #define _LINUX_RBTREE_AUGMENTED_H
#include <linux/compiler.h>
#include <linux/rbtree.h> #include <linux/rbtree.h>
/* /*
......
...@@ -71,12 +71,22 @@ err_alloc: ...@@ -71,12 +71,22 @@ err_alloc:
return NULL; return NULL;
} }
/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
#define MAX_PID_NS_LEVEL 32
static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
{ {
struct pid_namespace *ns; struct pid_namespace *ns;
unsigned int level = parent_pid_ns->level + 1; unsigned int level = parent_pid_ns->level + 1;
int i, err = -ENOMEM; int i;
int err;
if (level > MAX_PID_NS_LEVEL) {
err = -EINVAL;
goto out;
}
err = -ENOMEM;
ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
if (ns == NULL) if (ns == NULL)
goto out; goto out;
......
...@@ -178,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy ...@@ -178,7 +178,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
struct gen_pool_chunk *chunk; struct gen_pool_chunk *chunk;
int nbits = size >> pool->min_alloc_order; int nbits = size >> pool->min_alloc_order;
int nbytes = sizeof(struct gen_pool_chunk) + int nbytes = sizeof(struct gen_pool_chunk) +
(nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; BITS_TO_LONGS(nbits) * sizeof(long);
chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid);
if (unlikely(chunk == NULL)) if (unlikely(chunk == NULL))
......
...@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, ...@@ -196,28 +196,28 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
BUG_ON(atomic_read(&mm->mm_users) <= 0); BUG_ON(atomic_read(&mm->mm_users) <= 0);
/* /*
* Verify that mmu_notifier_init() already run and the global srcu is * Verify that mmu_notifier_init() already run and the global srcu is
* initialized. * initialized.
*/ */
BUG_ON(!srcu.per_cpu_ref); BUG_ON(!srcu.per_cpu_ref);
ret = -ENOMEM;
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm), GFP_KERNEL);
if (unlikely(!mmu_notifier_mm))
goto out;
if (take_mmap_sem) if (take_mmap_sem)
down_write(&mm->mmap_sem); down_write(&mm->mmap_sem);
ret = mm_take_all_locks(mm); ret = mm_take_all_locks(mm);
if (unlikely(ret)) if (unlikely(ret))
goto out; goto out_clean;
if (!mm_has_notifiers(mm)) { if (!mm_has_notifiers(mm)) {
mmu_notifier_mm = kmalloc(sizeof(struct mmu_notifier_mm),
GFP_KERNEL);
if (unlikely(!mmu_notifier_mm)) {
ret = -ENOMEM;
goto out_of_mem;
}
INIT_HLIST_HEAD(&mmu_notifier_mm->list); INIT_HLIST_HEAD(&mmu_notifier_mm->list);
spin_lock_init(&mmu_notifier_mm->lock); spin_lock_init(&mmu_notifier_mm->lock);
mm->mmu_notifier_mm = mmu_notifier_mm; mm->mmu_notifier_mm = mmu_notifier_mm;
mmu_notifier_mm = NULL;
} }
atomic_inc(&mm->mm_count); atomic_inc(&mm->mm_count);
...@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn, ...@@ -233,12 +233,12 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list); hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock); spin_unlock(&mm->mmu_notifier_mm->lock);
out_of_mem:
mm_drop_all_locks(mm); mm_drop_all_locks(mm);
out: out_clean:
if (take_mmap_sem) if (take_mmap_sem)
up_write(&mm->mmap_sem); up_write(&mm->mmap_sem);
kfree(mmu_notifier_mm);
out:
BUG_ON(atomic_read(&mm->mm_users) <= 0); BUG_ON(atomic_read(&mm->mm_users) <= 0);
return ret; return ret;
} }
......
...@@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid) ...@@ -1809,10 +1809,10 @@ static void __paginginit init_zone_allows_reclaim(int nid)
int i; int i;
for_each_online_node(i) for_each_online_node(i)
if (node_distance(nid, i) <= RECLAIM_DISTANCE) { if (node_distance(nid, i) <= RECLAIM_DISTANCE)
node_set(i, NODE_DATA(nid)->reclaim_nodes); node_set(i, NODE_DATA(nid)->reclaim_nodes);
else
zone_reclaim_mode = 1; zone_reclaim_mode = 1;
}
} }
#else /* CONFIG_NUMA */ #else /* CONFIG_NUMA */
......
...@@ -56,6 +56,7 @@ ...@@ -56,6 +56,7 @@
#include <linux/mmu_notifier.h> #include <linux/mmu_notifier.h>
#include <linux/migrate.h> #include <linux/migrate.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
#include <linux/backing-dev.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
...@@ -926,11 +927,8 @@ int page_mkclean(struct page *page) ...@@ -926,11 +927,8 @@ int page_mkclean(struct page *page)
if (page_mapped(page)) { if (page_mapped(page)) {
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (mapping) { if (mapping)
ret = page_mkclean_file(mapping, page); ret = page_mkclean_file(mapping, page);
if (page_test_and_clear_dirty(page_to_pfn(page), 1))
ret = 1;
}
} }
return ret; return ret;
...@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page) ...@@ -1116,6 +1114,7 @@ void page_add_file_rmap(struct page *page)
*/ */
void page_remove_rmap(struct page *page) void page_remove_rmap(struct page *page)
{ {
struct address_space *mapping = page_mapping(page);
bool anon = PageAnon(page); bool anon = PageAnon(page);
bool locked; bool locked;
unsigned long flags; unsigned long flags;
...@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page) ...@@ -1138,8 +1137,19 @@ void page_remove_rmap(struct page *page)
* this if the page is anon, so about to be freed; but perhaps * this if the page is anon, so about to be freed; but perhaps
* not if it's in swapcache - there might be another pte slot * not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap. * containing the swap entry, but page not yet written to swap.
*
* And we can skip it on file pages, so long as the filesystem
* participates in dirty tracking; but need to catch shm and tmpfs
* and ramfs pages which have been modified since creation by read
* fault.
*
* Note that mapping must be decided above, before decrementing
* mapcount (which luckily provides a barrier): once page is unmapped,
* it could be truncated and page->mapping reset to NULL at any moment.
* Note also that we are relying on page_mapping(page) to set mapping
* to &swapper_space when PageSwapCache(page).
*/ */
if ((!anon || PageSwapCache(page)) && if (mapping && !mapping_cap_account_dirty(mapping) &&
page_test_and_clear_dirty(page_to_pfn(page), 1)) page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page); set_page_dirty(page);
/* /*
......
...@@ -42,7 +42,10 @@ struct dev_exception_item { ...@@ -42,7 +42,10 @@ struct dev_exception_item {
struct dev_cgroup { struct dev_cgroup {
struct cgroup_subsys_state css; struct cgroup_subsys_state css;
struct list_head exceptions; struct list_head exceptions;
bool deny_all; enum {
DEVCG_DEFAULT_ALLOW,
DEVCG_DEFAULT_DENY,
} behavior;
}; };
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
...@@ -182,13 +185,13 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup) ...@@ -182,13 +185,13 @@ static struct cgroup_subsys_state *devcgroup_create(struct cgroup *cgroup)
parent_cgroup = cgroup->parent; parent_cgroup = cgroup->parent;
if (parent_cgroup == NULL) if (parent_cgroup == NULL)
dev_cgroup->deny_all = false; dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
else { else {
parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup); parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
mutex_lock(&devcgroup_mutex); mutex_lock(&devcgroup_mutex);
ret = dev_exceptions_copy(&dev_cgroup->exceptions, ret = dev_exceptions_copy(&dev_cgroup->exceptions,
&parent_dev_cgroup->exceptions); &parent_dev_cgroup->exceptions);
dev_cgroup->deny_all = parent_dev_cgroup->deny_all; dev_cgroup->behavior = parent_dev_cgroup->behavior;
mutex_unlock(&devcgroup_mutex); mutex_unlock(&devcgroup_mutex);
if (ret) { if (ret) {
kfree(dev_cgroup); kfree(dev_cgroup);
...@@ -260,7 +263,7 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft, ...@@ -260,7 +263,7 @@ static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
* - List the exceptions in case the default policy is to deny * - List the exceptions in case the default policy is to deny
* This way, the file remains as a "whitelist of devices" * This way, the file remains as a "whitelist of devices"
*/ */
if (devcgroup->deny_all == false) { if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
set_access(acc, ACC_MASK); set_access(acc, ACC_MASK);
set_majmin(maj, ~0); set_majmin(maj, ~0);
set_majmin(min, ~0); set_majmin(min, ~0);
...@@ -314,12 +317,12 @@ static int may_access(struct dev_cgroup *dev_cgroup, ...@@ -314,12 +317,12 @@ static int may_access(struct dev_cgroup *dev_cgroup,
* In two cases we'll consider this new exception valid: * In two cases we'll consider this new exception valid:
* - the dev cgroup has its default policy to allow + exception list: * - the dev cgroup has its default policy to allow + exception list:
* the new exception should *not* match any of the exceptions * the new exception should *not* match any of the exceptions
* (!deny_all, !match) * (behavior == DEVCG_DEFAULT_ALLOW, !match)
* - the dev cgroup has its default policy to deny + exception list: * - the dev cgroup has its default policy to deny + exception list:
* the new exception *should* match the exceptions * the new exception *should* match the exceptions
* (deny_all, match) * (behavior == DEVCG_DEFAULT_DENY, match)
*/ */
if (dev_cgroup->deny_all == match) if ((dev_cgroup->behavior == DEVCG_DEFAULT_DENY) == match)
return 1; return 1;
return 0; return 0;
} }
...@@ -341,6 +344,17 @@ static int parent_has_perm(struct dev_cgroup *childcg, ...@@ -341,6 +344,17 @@ static int parent_has_perm(struct dev_cgroup *childcg,
return may_access(parent, ex); return may_access(parent, ex);
} }
/**
* may_allow_all - checks if it's possible to change the behavior to
* allow based on parent's rules.
* @parent: device cgroup's parent
* returns: != 0 in case it's allowed, 0 otherwise
*/
static inline int may_allow_all(struct dev_cgroup *parent)
{
return parent->behavior == DEVCG_DEFAULT_ALLOW;
}
/* /*
* Modify the exception list using allow/deny rules. * Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
...@@ -358,9 +372,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -358,9 +372,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
int filetype, const char *buffer) int filetype, const char *buffer)
{ {
const char *b; const char *b;
char *endp; char temp[12]; /* 11 + 1 characters needed for a u32 */
int count; int count, rc;
struct dev_exception_item ex; struct dev_exception_item ex;
struct cgroup *p = devcgroup->css.cgroup;
struct dev_cgroup *parent = cgroup_to_devcgroup(p->parent);
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -372,14 +388,18 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -372,14 +388,18 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
case 'a': case 'a':
switch (filetype) { switch (filetype) {
case DEVCG_ALLOW: case DEVCG_ALLOW:
if (!parent_has_perm(devcgroup, &ex)) if (!may_allow_all(parent))
return -EPERM; return -EPERM;
dev_exception_clean(devcgroup); dev_exception_clean(devcgroup);
devcgroup->deny_all = false; rc = dev_exceptions_copy(&devcgroup->exceptions,
&parent->exceptions);
if (rc)
return rc;
devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
break; break;
case DEVCG_DENY: case DEVCG_DENY:
dev_exception_clean(devcgroup); dev_exception_clean(devcgroup);
devcgroup->deny_all = true; devcgroup->behavior = DEVCG_DEFAULT_DENY;
break; break;
default: default:
return -EINVAL; return -EINVAL;
...@@ -402,8 +422,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -402,8 +422,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
ex.major = ~0; ex.major = ~0;
b++; b++;
} else if (isdigit(*b)) { } else if (isdigit(*b)) {
ex.major = simple_strtoul(b, &endp, 10); memset(temp, 0, sizeof(temp));
b = endp; for (count = 0; count < sizeof(temp) - 1; count++) {
temp[count] = *b;
b++;
if (!isdigit(*b))
break;
}
rc = kstrtou32(temp, 10, &ex.major);
if (rc)
return -EINVAL;
} else { } else {
return -EINVAL; return -EINVAL;
} }
...@@ -416,8 +444,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -416,8 +444,16 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
ex.minor = ~0; ex.minor = ~0;
b++; b++;
} else if (isdigit(*b)) { } else if (isdigit(*b)) {
ex.minor = simple_strtoul(b, &endp, 10); memset(temp, 0, sizeof(temp));
b = endp; for (count = 0; count < sizeof(temp) - 1; count++) {
temp[count] = *b;
b++;
if (!isdigit(*b))
break;
}
rc = kstrtou32(temp, 10, &ex.minor);
if (rc)
return -EINVAL;
} else { } else {
return -EINVAL; return -EINVAL;
} }
...@@ -452,7 +488,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -452,7 +488,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
* an matching exception instead. And be silent about it: we * an matching exception instead. And be silent about it: we
* don't want to break compatibility * don't want to break compatibility
*/ */
if (devcgroup->deny_all == false) { if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
dev_exception_rm(devcgroup, &ex); dev_exception_rm(devcgroup, &ex);
return 0; return 0;
} }
...@@ -463,7 +499,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -463,7 +499,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
* an matching exception instead. And be silent about it: we * an matching exception instead. And be silent about it: we
* don't want to break compatibility * don't want to break compatibility
*/ */
if (devcgroup->deny_all == true) { if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
dev_exception_rm(devcgroup, &ex); dev_exception_rm(devcgroup, &ex);
return 0; return 0;
} }
...@@ -533,10 +569,10 @@ struct cgroup_subsys devices_subsys = { ...@@ -533,10 +569,10 @@ struct cgroup_subsys devices_subsys = {
* *
* returns 0 on success, -EPERM case the operation is not permitted * returns 0 on success, -EPERM case the operation is not permitted
*/ */
static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup, static int __devcgroup_check_permission(short type, u32 major, u32 minor,
short type, u32 major, u32 minor,
short access) short access)
{ {
struct dev_cgroup *dev_cgroup;
struct dev_exception_item ex; struct dev_exception_item ex;
int rc; int rc;
...@@ -547,6 +583,7 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup, ...@@ -547,6 +583,7 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
ex.access = access; ex.access = access;
rcu_read_lock(); rcu_read_lock();
dev_cgroup = task_devcgroup(current);
rc = may_access(dev_cgroup, &ex); rc = may_access(dev_cgroup, &ex);
rcu_read_unlock(); rcu_read_unlock();
...@@ -558,7 +595,6 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup, ...@@ -558,7 +595,6 @@ static int __devcgroup_check_permission(struct dev_cgroup *dev_cgroup,
int __devcgroup_inode_permission(struct inode *inode, int mask) int __devcgroup_inode_permission(struct inode *inode, int mask)
{ {
struct dev_cgroup *dev_cgroup = task_devcgroup(current);
short type, access = 0; short type, access = 0;
if (S_ISBLK(inode->i_mode)) if (S_ISBLK(inode->i_mode))
...@@ -570,13 +606,12 @@ int __devcgroup_inode_permission(struct inode *inode, int mask) ...@@ -570,13 +606,12 @@ int __devcgroup_inode_permission(struct inode *inode, int mask)
if (mask & MAY_READ) if (mask & MAY_READ)
access |= ACC_READ; access |= ACC_READ;
return __devcgroup_check_permission(dev_cgroup, type, imajor(inode), return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
iminor(inode), access); access);
} }
int devcgroup_inode_mknod(int mode, dev_t dev) int devcgroup_inode_mknod(int mode, dev_t dev)
{ {
struct dev_cgroup *dev_cgroup = task_devcgroup(current);
short type; short type;
if (!S_ISBLK(mode) && !S_ISCHR(mode)) if (!S_ISBLK(mode) && !S_ISCHR(mode))
...@@ -587,7 +622,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev) ...@@ -587,7 +622,7 @@ int devcgroup_inode_mknod(int mode, dev_t dev)
else else
type = DEV_CHAR; type = DEV_CHAR;