Commit e041c683 authored by Alan Stern's avatar Alan Stern Committed by Linus Torvalds

[PATCH] Notifier chain update: API changes

The kernel's implementation of notifier chains is unsafe.  There is no
protection against entries being added to or removed from a chain while the
chain is in use.  The issues were discussed in this thread:

    http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2

We noticed that notifier chains in the kernel fall into two basic usage
classes:

	"Blocking" chains are always called from a process context
	and the callout routines are allowed to sleep;

	"Atomic" chains can be called from an atomic context and
	the callout routines are not allowed to sleep.

We decided to codify this distinction and make it part of the API.  Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name).  New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain.  The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.

With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed.  For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections.  (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)

There are some limitations, which should not be too hard to live with.  For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem.  Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain.  (This did happen in a couple of places and the code
had to be changed to avoid it.)

Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization.  Instead we use RCU.  The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.

Here is the list of chains that we adjusted and their classifications.  None
of them use the raw API, so for the moment it is only a placeholder.

  ATOMIC CHAINS
  -------------
arch/i386/kernel/traps.c:		i386die_chain
arch/ia64/kernel/traps.c:		ia64die_chain
arch/powerpc/kernel/traps.c:		powerpc_die_chain
arch/sparc64/kernel/traps.c:		sparc64die_chain
arch/x86_64/kernel/traps.c:		die_chain
drivers/char/ipmi/ipmi_si_intf.c:	xaction_notifier_list
kernel/panic.c:				panic_notifier_list
kernel/profile.c:			task_free_notifier
net/bluetooth/hci_core.c:		hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c:	ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c:	ip_conntrack_expect_chain
net/ipv6/addrconf.c:			inet6addr_chain
net/netfilter/nf_conntrack_core.c:	nf_conntrack_chain
net/netfilter/nf_conntrack_core.c:	nf_conntrack_expect_chain
net/netlink/af_netlink.c:		netlink_chain

  BLOCKING CHAINS
  ---------------
arch/powerpc/platforms/pseries/reconfig.c:	pSeries_reconfig_chain
arch/s390/kernel/process.c:		idle_chain
arch/x86_64/kernel/process.c		idle_notifier
drivers/base/memory.c:			memory_chain
drivers/cpufreq/cpufreq.c		cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c		cpufreq_transition_notifier_list
drivers/macintosh/adb.c:		adb_client_list
drivers/macintosh/via-pmu.c		sleep_notifier_list
drivers/macintosh/via-pmu68k.c		sleep_notifier_list
drivers/macintosh/windfarm_core.c	wf_client_list
drivers/usb/core/notify.c		usb_notifier_list
drivers/video/fbmem.c			fb_notifier_list
kernel/cpu.c				cpu_chain
kernel/module.c				module_notify_list
kernel/profile.c			munmap_notifier
kernel/profile.c			task_exit_notifier
kernel/sys.c				reboot_notifier_list
net/core/dev.c				netdev_chain
net/decnet/dn_dev.c:			dnaddr_chain
net/ipv4/devinet.c:			inetaddr_chain

It's possible that some of these classifications are wrong.  If they are,
please let us know or submit a patch to fix them.  Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)

The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.

[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: default avatarAlan Stern <stern@rowland.harvard.edu>
Signed-off-by: default avatarChandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: default avatarJes Sorensen <jes@sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 76b81e2b
......@@ -43,7 +43,7 @@
#include <asm/setup.h>
#include <asm/io.h>
extern struct notifier_block *panic_notifier_list;
extern struct atomic_notifier_head panic_notifier_list;
static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
static struct notifier_block alpha_panic_block = {
alpha_panic_event,
......@@ -500,7 +500,8 @@ setup_arch(char **cmdline_p)
}
/* Register a call for panic conditions. */
notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
atomic_notifier_chain_register(&panic_notifier_list,
&alpha_panic_block);
#ifdef CONFIG_ALPHA_GENERIC
/* Assume that we've booted from SRM if we haven't booted from MILO.
......
......@@ -141,7 +141,7 @@ static int __init netstar_late_init(void)
/* TODO: Setup front panel switch here */
/* Setup panic notifier */
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
......
......@@ -235,7 +235,7 @@ static struct notifier_block panic_block = {
static int __init voiceblue_setup(void)
{
/* Setup panic notifier */
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
......
......@@ -92,22 +92,21 @@ asmlinkage void spurious_interrupt_bug(void);
asmlinkage void machine_check(void);
static int kstack_depth_to_print = 24;
struct notifier_block *i386die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
ATOMIC_NOTIFIER_HEAD(i386die_chain);
int register_die_notifier(struct notifier_block *nb)
{
int err = 0;
unsigned long flags;
vmalloc_sync_all();
spin_lock_irqsave(&die_notifier_lock, flags);
err = notifier_chain_register(&i386die_chain, nb);
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
return atomic_notifier_chain_register(&i386die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&i386die_chain, nb);
}
EXPORT_SYMBOL(unregister_die_notifier);
static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
{
return p > (void *)tinfo &&
......
......@@ -30,19 +30,19 @@ extern spinlock_t timerlist_lock;
fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
struct notifier_block *ia64die_chain;
ATOMIC_NOTIFIER_HEAD(ia64die_chain);
int
register_die_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&ia64die_chain, nb);
return atomic_notifier_chain_register(&ia64die_chain, nb);
}
EXPORT_SYMBOL_GPL(register_die_notifier);
int
unregister_die_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&ia64die_chain, nb);
return atomic_notifier_chain_unregister(&ia64die_chain, nb);
}
EXPORT_SYMBOL_GPL(unregister_die_notifier);
......
......@@ -165,7 +165,8 @@ void __init plat_setup(void)
/* Set up panic notifier */
for (i = 0; i < sizeof(lasat_panic_block) / sizeof(struct notifier_block); i++)
notifier_chain_register(&panic_notifier_list, &lasat_panic_block[i]);
atomic_notifier_chain_register(&panic_notifier_list,
&lasat_panic_block[i]);
lasat_reboot_setup();
......
......@@ -238,7 +238,7 @@ static int __init reboot_setup(void)
request_irq(SGI_PANEL_IRQ, panel_int, 0, "Front Panel", NULL);
init_timer(&blink_timer);
blink_timer.function = blink_timeout;
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
return 0;
}
......
......@@ -193,7 +193,7 @@ static __init int ip32_reboot_setup(void)
init_timer(&blink_timer);
blink_timer.function = blink_timeout;
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
request_irq(MACEISA_RTC_IRQ, ip32_rtc_int, 0, "rtc", NULL);
......
......@@ -150,7 +150,8 @@ void __init parisc_pdc_chassis_init(void)
if (handle) {
/* initialize panic notifier chain */
notifier_chain_register(&panic_notifier_list, &pdc_chassis_panic_block);
atomic_notifier_chain_register(&panic_notifier_list,
&pdc_chassis_panic_block);
/* initialize reboot notifier chain */
register_reboot_notifier(&pdc_chassis_reboot_block);
......
......@@ -579,7 +579,8 @@ void __init setup_arch(char **cmdline_p)
panic_timeout = 180;
if (ppc_md.panic)
notifier_chain_register(&panic_notifier_list, &ppc64_panic_block);
atomic_notifier_chain_register(&panic_notifier_list,
&ppc64_panic_block);
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
......
......@@ -74,19 +74,19 @@ EXPORT_SYMBOL(__debugger_dabr_match);
EXPORT_SYMBOL(__debugger_fault_handler);
#endif
struct notifier_block *powerpc_die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
ATOMIC_NOTIFIER_HEAD(powerpc_die_chain);
int register_die_notifier(struct notifier_block *nb)
{
int err = 0;
unsigned long flags;
return atomic_notifier_chain_register(&powerpc_die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
spin_lock_irqsave(&die_notifier_lock, flags);
err = notifier_chain_register(&powerpc_die_chain, nb);
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&powerpc_die_chain, nb);
}
EXPORT_SYMBOL(unregister_die_notifier);
/*
* Trap & Exception support
......
......@@ -94,16 +94,16 @@ static struct device_node *derive_parent(const char *path)
return parent;
}
static struct notifier_block *pSeries_reconfig_chain;
static BLOCKING_NOTIFIER_HEAD(pSeries_reconfig_chain);
int pSeries_reconfig_notifier_register(struct notifier_block *nb)
{
return notifier_chain_register(&pSeries_reconfig_chain, nb);
return blocking_notifier_chain_register(&pSeries_reconfig_chain, nb);
}
void pSeries_reconfig_notifier_unregister(struct notifier_block *nb)
{
notifier_chain_unregister(&pSeries_reconfig_chain, nb);
blocking_notifier_chain_unregister(&pSeries_reconfig_chain, nb);
}
static int pSeries_reconfig_add_node(const char *path, struct property *proplist)
......@@ -131,7 +131,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
goto out_err;
}
err = notifier_call_chain(&pSeries_reconfig_chain,
err = blocking_notifier_call_chain(&pSeries_reconfig_chain,
PSERIES_RECONFIG_ADD, np);
if (err == NOTIFY_BAD) {
printk(KERN_ERR "Failed to add device node %s\n", path);
......@@ -171,7 +171,7 @@ static int pSeries_reconfig_remove_node(struct device_node *np)
remove_node_proc_entries(np);
notifier_call_chain(&pSeries_reconfig_chain,
blocking_notifier_call_chain(&pSeries_reconfig_chain,
PSERIES_RECONFIG_REMOVE, np);
of_detach_node(np);
......
......@@ -736,7 +736,7 @@ ibm_statusled_progress(char *s, unsigned short hex)
hex = 0xfff;
if (!notifier_installed) {
++notifier_installed;
notifier_chain_register(&panic_notifier_list,
atomic_notifier_chain_register(&panic_notifier_list,
&ibm_statusled_block);
}
}
......
......@@ -76,17 +76,17 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
/*
* Need to know about CPUs going idle?
*/
static struct notifier_block *idle_chain;
static ATOMIC_NOTIFIER_HEAD(idle_chain);
int register_idle_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&idle_chain, nb);
return atomic_notifier_chain_register(&idle_chain, nb);
}
EXPORT_SYMBOL(register_idle_notifier);
int unregister_idle_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&idle_chain, nb);
return atomic_notifier_chain_unregister(&idle_chain, nb);
}
EXPORT_SYMBOL(unregister_idle_notifier);
......@@ -95,7 +95,7 @@ void do_monitor_call(struct pt_regs *regs, long interruption_code)
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
}
......@@ -116,7 +116,8 @@ static void default_idle(void)
return;
}
rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
rc = atomic_notifier_call_chain(&idle_chain,
CPU_IDLE, (void *)(long) cpu);
if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
BUG();
if (rc != NOTIFY_OK) {
......
......@@ -43,18 +43,19 @@
#include <linux/kmod.h>
#endif
struct notifier_block *sparc64die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
ATOMIC_NOTIFIER_HEAD(sparc64die_chain);
int register_die_notifier(struct notifier_block *nb)
{
int err = 0;
unsigned long flags;
spin_lock_irqsave(&die_notifier_lock, flags);
err = notifier_chain_register(&sparc64die_chain, nb);
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
return atomic_notifier_chain_register(&sparc64die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&sparc64die_chain, nb);
}
EXPORT_SYMBOL(unregister_die_notifier);
/* When an irrecoverable trap occurs at tl > 0, the trap entry
* code logs the trap state registers at every level in the trap
......
......@@ -762,7 +762,8 @@ static struct notifier_block panic_exit_notifier = {
static int add_notifier(void)
{
notifier_chain_register(&panic_notifier_list, &panic_exit_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
&panic_exit_notifier);
return(0);
}
......
......@@ -477,7 +477,8 @@ static struct notifier_block panic_exit_notifier = {
void __init setup_arch(char **cmdline_p)
{
notifier_chain_register(&panic_notifier_list, &panic_exit_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
&panic_exit_notifier);
paging_init();
strlcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
......
......@@ -66,24 +66,17 @@ EXPORT_SYMBOL(boot_option_idle_override);
void (*pm_idle)(void);
static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
static struct notifier_block *idle_notifier;
static DEFINE_SPINLOCK(idle_notifier_lock);
static ATOMIC_NOTIFIER_HEAD(idle_notifier);
void idle_notifier_register(struct notifier_block *n)
{
unsigned long flags;
spin_lock_irqsave(&idle_notifier_lock, flags);
notifier_chain_register(&idle_notifier, n);
spin_unlock_irqrestore(&idle_notifier_lock, flags);
atomic_notifier_chain_register(&idle_notifier, n);
}
EXPORT_SYMBOL_GPL(idle_notifier_register);
void idle_notifier_unregister(struct notifier_block *n)
{
unsigned long flags;
spin_lock_irqsave(&idle_notifier_lock, flags);
notifier_chain_unregister(&idle_notifier, n);
spin_unlock_irqrestore(&idle_notifier_lock, flags);
atomic_notifier_chain_unregister(&idle_notifier, n);
}
EXPORT_SYMBOL(idle_notifier_unregister);
......@@ -93,13 +86,13 @@ static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
void enter_idle(void)
{
__get_cpu_var(idle_state) = CPU_IDLE;
notifier_call_chain(&idle_notifier, IDLE_START, NULL);
atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
}
static void __exit_idle(void)
{
__get_cpu_var(idle_state) = CPU_NOT_IDLE;
notifier_call_chain(&idle_notifier, IDLE_END, NULL);
atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
}
/* Called from interrupts to signify idle end */
......
......@@ -69,20 +69,20 @@ asmlinkage void alignment_check(void);
asmlinkage void machine_check(void);
asmlinkage void spurious_interrupt_bug(void);
struct notifier_block *die_chain;
static DEFINE_SPINLOCK(die_notifier_lock);
ATOMIC_NOTIFIER_HEAD(die_chain);
int register_die_notifier(struct notifier_block *nb)
{
int err = 0;
unsigned long flags;
vmalloc_sync_all();
spin_lock_irqsave(&die_notifier_lock, flags);
err = notifier_chain_register(&die_chain, nb);
spin_unlock_irqrestore(&die_notifier_lock, flags);
return err;
return atomic_notifier_chain_register(&die_chain, nb);
}
EXPORT_SYMBOL(register_die_notifier);
int unregister_die_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&die_chain, nb);
}
EXPORT_SYMBOL(unregister_die_notifier);
static inline void conditional_sti(struct pt_regs *regs)
{
......
......@@ -108,5 +108,5 @@ static struct notifier_block iss_panic_block = {
void __init platform_setup(char **p_cmdline)
{
notifier_chain_register(&panic_notifier_list, &iss_panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
}
......@@ -47,16 +47,16 @@ static struct kset_uevent_ops memory_uevent_ops = {
.uevent = memory_uevent,
};
static struct notifier_block *memory_chain;
static BLOCKING_NOTIFIER_HEAD(memory_chain);
int register_memory_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&memory_chain, nb);
return blocking_notifier_chain_register(&memory_chain, nb);
}
void unregister_memory_notifier(struct notifier_block *nb)
{
notifier_chain_unregister(&memory_chain, nb);
blocking_notifier_chain_unregister(&memory_chain, nb);
}
/*
......@@ -140,7 +140,7 @@ static ssize_t show_mem_state(struct sys_device *dev, char *buf)
static inline int memory_notify(unsigned long val, void *v)
{
return notifier_call_chain(&memory_chain, val, v);
return blocking_notifier_call_chain(&memory_chain, val, v);
}
/*
......
......@@ -3744,7 +3744,7 @@ static int ipmi_init_msghandler(void)
ipmi_timer.expires = jiffies + IPMI_TIMEOUT_JIFFIES;
add_timer(&ipmi_timer);
notifier_chain_register(&panic_notifier_list, &panic_block);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
initialized = 1;
......@@ -3764,7 +3764,7 @@ static __exit void cleanup_ipmi(void)
if (!initialized)
return;
notifier_chain_unregister(&panic_notifier_list, &panic_block);
atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
/* This can't be called if any interfaces exist, so no worry about
shutting down the interfaces. */
......
......@@ -237,10 +237,10 @@ struct smi_info
static int try_smi_init(struct smi_info *smi);
static struct notifier_block *xaction_notifier_list;
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block * nb)
{
return notifier_chain_register(&xaction_notifier_list, nb);
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
static void si_restart_short_timer(struct smi_info *smi_info);
......@@ -302,7 +302,8 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
do_gettimeofday(&t);
printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif
err = notifier_call_chain(&xaction_notifier_list, 0, smi_info);
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
if (err & NOTIFY_STOP_MASK) {
rv = SI_SM_CALL_WITHOUT_DELAY;
goto out;
......
......@@ -1158,7 +1158,8 @@ static int __init ipmi_wdog_init(void)
}
register_reboot_notifier(&wdog_reboot_notifier);
notifier_chain_register(&panic_notifier_list, &wdog_panic_notifier);
atomic_notifier_chain_register(&panic_notifier_list,
&wdog_panic_notifier);
printk(KERN_INFO PFX "driver initialized\n");
......@@ -1176,7 +1177,8 @@ static __exit void ipmi_unregister_watchdog(void)
release_nmi(&ipmi_nmi_handler);
#endif
notifier_chain_unregister(&panic_notifier_list, &wdog_panic_notifier);
atomic_notifier_chain_unregister(&panic_notifier_list,
&wdog_panic_notifier);
unregister_reboot_notifier(&wdog_reboot_notifier);
if (! watchdog_user)
......
......@@ -52,9 +52,8 @@ static void handle_update(void *data);
* changes to devices when the CPU clock speed changes.
* The mutex locks both lists.
*/
static struct notifier_block *cpufreq_policy_notifier_list;
static struct notifier_block *cpufreq_transition_notifier_list;
static DECLARE_RWSEM (cpufreq_notifier_rwsem);
static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
static BLOCKING_NOTIFIER_HEAD(cpufreq_transition_notifier_list);
static LIST_HEAD(cpufreq_governor_list);
......@@ -247,8 +246,6 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
dprintk("notification %u of frequency transition to %u kHz\n",
state, freqs->new);
down_read(&cpufreq_notifier_rwsem);
policy = cpufreq_cpu_data[freqs->cpu];
switch (state) {
......@@ -266,20 +263,19 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
freqs->old = policy->cur;
}
}
notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs);
blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_PRECHANGE, freqs);
adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
break;
case CPUFREQ_POSTCHANGE:
adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
policy->cur = freqs->new;
break;
}
up_read(&cpufreq_notifier_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
......@@ -1007,7 +1003,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
freqs.old = cpu_policy->cur;
freqs.new = cur_freq;
notifier_call_chain(&cpufreq_transition_notifier_list,
blocking_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_SUSPENDCHANGE, &freqs);
adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs);
......@@ -1088,7 +1084,8 @@ static int cpufreq_resume(struct sys_device * sysdev)
freqs.old = cpu_policy->cur;
freqs.new = cur_freq;
notifier_call_chain(&cpufreq_transition_notifier_list,
blocking_notifier_call_chain(
&cpufreq_transition_notifier_list,
CPUFREQ_RESUMECHANGE, &freqs);
adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs);
......@@ -1125,24 +1122,24 @@ static struct sysdev_driver cpufreq_sysdev_driver = {
* changes in cpufreq policy.
*
* This function may sleep, and has the same return conditions as
* notifier_chain_register.
* blocking_notifier_chain_register.
*/
int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
down_write(&cpufreq_notifier_rwsem);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = notifier_chain_register(&cpufreq_transition_notifier_list, nb);
ret = blocking_notifier_chain_register(
&cpufreq_transition_notifier_list, nb);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = notifier_chain_register(&cpufreq_policy_notifier_list, nb);
ret = blocking_notifier_chain_register(
&cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
up_write(&cpufreq_notifier_rwsem);
return ret;
}
......@@ -1157,24 +1154,24 @@ EXPORT_SYMBOL(cpufreq_register_notifier);
* Remove a driver from the CPU frequency notifier list.
*
* This function may sleep, and has the same return conditions as
* notifier_chain_unregister.
* blocking_notifier_chain_unregister.
*/
int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
{
int ret;
down_write(&cpufreq_notifier_rwsem);
switch (list) {
case CPUFREQ_TRANSITION_NOTIFIER:
ret = notifier_chain_unregister(&cpufreq_transition_notifier_list, nb);
ret = blocking_notifier_chain_unregister(
&cpufreq_transition_notifier_list, nb);
break;
case CPUFREQ_POLICY_NOTIFIER:
ret = notifier_chain_unregister(&cpufreq_policy_notifier_list, nb);
ret = blocking_notifier_chain_unregister(
&cpufreq_policy_notifier_list, nb);
break;
default:
ret = -EINVAL;
}
up_write(&cpufreq_notifier_rwsem);
return ret;
}
......@@ -1346,29 +1343,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_poli
if (ret)
goto error_out;
down_read(&cpufreq_notifier_rwsem);
/* adjust if necessary - all reasons */
notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST,
policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_ADJUST, policy);
/* adjust if necessary - hardware incompatibility*/
notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_INCOMPATIBLE,
policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_INCOMPATIBLE, policy);
/* verify the cpu speed can be set within this limit,
which might be different to the first one */
ret = cpufreq_driver->verify(policy);
if (ret) {
up_read(&cpufreq_notifier_rwsem);
if (ret)
goto error_out;
}
/* notification of the new policy */
notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_NOTIFY,
policy);
up_read(&cpufreq_notifier_rwsem);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_NOTIFY, policy);
data->min = policy->min;
data->max = policy->max;
......
......@@ -484,26 +484,15 @@ static void dcdbas_host_control(void)
static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
void *unused)
{
static unsigned int notify_cnt = 0;
switch (code) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
if (host_control_on_shutdown) {
/* firmware is going to perform host control action */
if (++notify_cnt == 2) {
printk(KERN_WARNING
"Please wait for shutdown "
"action to complete...\n");
dcdbas_host_control();
}
/*
* register again and initiate the host control
* action on the second notification to allow
* everyone that registered to be notified
*/
register_reboot_notifier(nb);
printk(KERN_WARNING "Please wait for shutdown "
"action to complete...\n");
dcdbas_host_control();
}
break;
}
......@@ -514,7 +503,7 @@ static int dcdbas_reboot_notify(struct notifier_block *nb, unsigned long code,
static struct notifier_block dcdbas_reboot_nb = {
.notifier_call = dcdbas_reboot_notify,
.next = NULL,
.priority = 0
.priority = INT_MIN
};
static DCDBAS_BIN_ATTR_RW(smi_data);
......
......@@ -80,7 +80,7 @@ static struct adb_driver *adb_driver_list[] = {
static struct class *adb_dev_class;
struct adb_driver *adb_controller;
struct notifier_block *adb_client_list = NULL;
BLOCKING_NOTIFIER_HEAD(adb_client_list);
static int adb_got_sleep;
static int adb_inited;
static pid_t adb_probe_task_pid;
......@@ -354,7 +354,8 @@ adb_notify_sleep(struct pmu_sleep_notifier *self, int when)
/* Stop autopoll */
if (adb_controller->autopoll)
adb_controller->autopoll(0);
ret = notifier_call_chain(&adb_client_list, ADB_MSG_POWERDOWN, NULL);
ret = blocking_notifier_call_chain(&adb_client_list,
ADB_MSG_POWERDOWN, NULL);
if (ret & NOTIFY_STOP_MASK) {
up(&adb_probe_mutex);
return PBOOK_SLEEP_REFUSE;
......@@ -391,7 +392,8 @@ do_adb_reset_bus(void)
if (adb_controller->autopoll)
adb_controller->autopoll(0);
nret = notifier_call_chain(&adb_client_list, ADB_MSG_PRE_RESET, NULL);
nret = blocking_notifier_call_chain(&adb_client_list,
ADB_MSG_PRE_RESET, NULL);
if (nret & NOTIFY_STOP_MASK) {
if (adb_controller