Commit a6408f6c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug updates from Thomas Gleixner:
 "This is the next part of the hotplug rework.

   - Convert all notifiers with a priority assigned

   - Convert all CPU_STARTING/DYING notifiers

     The final removal of the STARTING/DYING infrastructure will happen
     when the merge window closes.

  Another 700 hundred line of unpenetrable maze gone :)"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
  timers/core: Correct callback order during CPU hot plug
  leds/trigger/cpu: Move from CPU_STARTING to ONLINE level
  powerpc/numa: Convert to hotplug state machine
  arm/perf: Fix hotplug state machine conversion
  irqchip/armada: Avoid unused function warnings
  ARC/time: Convert to hotplug state machine
  clocksource/atlas7: Convert to hotplug state machine
  clocksource/armada-370-xp: Convert to hotplug state machine
  clocksource/exynos_mct: Convert to hotplug state machine
  clocksource/arm_global_timer: Convert to hotplug state machine
  rcu: Convert rcutree to hotplug state machine
  KVM/arm/arm64/vgic-new: Convert to hotplug state machine
  smp/cfd: Convert core to hotplug state machine
  x86/x2apic: Convert to CPU hotplug state machine
  profile: Convert to hotplug state machine
  timers/core: Convert to hotplug state machine
  hrtimer: Convert to hotplug state machine
  x86/tboot: Convert to hotplug state machine
  arm64/armv8 deprecated: Convert to hotplug state machine
  hwtracing/coresight-etm4x: Convert to hotplug state machine
  ...
parents 1a81a8f2 4fae16df
......@@ -296,30 +296,23 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
static int arc_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static int arc_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
evt->cpumask = cpumask_of(smp_processor_id());
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
clockevents_config_and_register(evt, arc_timer_freq,
0, ULONG_MAX);
enable_percpu_irq(arc_timer_irq, 0);
break;
case CPU_DYING:
disable_percpu_irq(arc_timer_irq);
break;
}
return NOTIFY_OK;
clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
enable_percpu_irq(arc_timer_irq, 0);
return 0;
}
static struct notifier_block arc_timer_cpu_nb = {
.notifier_call = arc_timer_cpu_notify,
};
static int arc_timer_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(arc_timer_irq);
return 0;
}
/*
* clockevent setup for boot CPU
......@@ -329,12 +322,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
int ret;
ret = register_cpu_notifier(&arc_timer_cpu_nb);
if (ret) {
pr_err("Failed to register cpu notifier");
return ret;
}
arc_timer_irq = irq_of_parse_and_map(node, 0);
if (arc_timer_irq <= 0) {
pr_err("clockevent: missing irq");
......@@ -347,11 +334,6 @@ static int __init arc_clockevent_setup(struct device_node *node)
return ret;
}
evt->irq = arc_timer_irq;
evt->cpumask = cpumask_of(smp_processor_id());
clockevents_config_and_register(evt, arc_timer_freq,
0, ARC_TIMER_MAX);
/* Needs apriori irq_set_percpu_devid() done in intc map function */
ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
"Timer0 (per-cpu-tick)", evt);
......@@ -360,8 +342,14 @@ static int __init arc_clockevent_setup(struct device_node *node)
return ret;
}
enable_percpu_irq(arc_timer_irq, 0);
ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
"AP_ARC_TIMER_STARTING",
arc_timer_starting_cpu,
arc_timer_dying_cpu);
if (ret) {
pr_err("Failed to setup hotplug state");
return ret;
}
return 0;
}
......
......@@ -310,24 +310,17 @@ static void twd_timer_setup(void)
enable_percpu_irq(clk->irq, 0);
}
static int twd_timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
static int twd_timer_starting_cpu(unsigned int cpu)
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
twd_timer_setup();
break;
case CPU_DYING:
twd_timer_stop();
break;
}
return NOTIFY_OK;
twd_timer_setup();
return 0;
}
static struct notifier_block twd_timer_cpu_nb = {
.notifier_call = twd_timer_cpu_notify,
};
static int twd_timer_dying_cpu(unsigned int cpu)
{
twd_timer_stop();
return 0;
}
static int __init twd_local_timer_common_register(struct device_node *np)
{
......@@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np)
goto out_free;
}
err = register_cpu_notifier(&twd_timer_cpu_nb);
if (err)
goto out_irq;
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
"AP_ARM_TWD_STARTING",
twd_timer_starting_cpu, twd_timer_dying_cpu);
twd_get_clock(np);
if (!of_property_read_bool(np, "always-on"))
......@@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np)
return 0;
out_irq:
free_percpu_irq(twd_ppi, twd_evt);
out_free:
iounmap(twd_base);
twd_base = NULL;
......
......@@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = {
.notifier_call = mvebu_hwcc_notifier,
};
static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb,
unsigned long action, void *hcpu)
static int armada_xp_clear_l2_starting(unsigned int cpu)
{
if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
armada_xp_clear_shared_l2();
return NOTIFY_OK;
armada_xp_clear_shared_l2();
return 0;
}
static struct notifier_block armada_xp_clear_shared_l2_notifier = {
.notifier_call = armada_xp_clear_shared_l2_notifier_func,
.priority = 100,
};
static void __init armada_370_coherency_init(struct device_node *np)
{
struct resource res;
......@@ -155,8 +147,9 @@ static void __init armada_370_coherency_init(struct device_node *np)
of_node_put(cpu_config_np);
register_cpu_notifier(&armada_xp_clear_shared_l2_notifier);
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
"AP_ARM_MVEBU_COHERENCY",
armada_xp_clear_l2_starting, NULL);
exit:
set_cpu_coherent();
}
......
......@@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base)
L310_POWER_CTRL);
}
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
static int l2c310_starting_cpu(unsigned int cpu)
{
switch (act & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
break;
case CPU_DYING:
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
break;
}
return NOTIFY_OK;
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
return 0;
}
static int l2c310_dying_cpu(unsigned int cpu)
{
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
return 0;
}
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
......@@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
cpu_notifier(l2c310_cpu_enable_flz, 0);
}
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
"AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
l2c310_dying_cpu);
}
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
......
......@@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
* hardware state at every thread switch. We clear our held state when
* a CPU has been killed, indicating that the VFP hardware doesn't contain
* a threads VFP state. When a CPU starts up, we re-enable access to the
* VFP hardware.
*
* Both CPU_DYING and CPU_STARTING are called on the CPU which
* VFP hardware. The callbacks below are called on the CPU which
* is being offlined/onlined.
*/
static int vfp_hotplug(struct notifier_block *b, unsigned long action,
void *hcpu)
static int vfp_dying_cpu(unsigned int cpu)
{
if (action == CPU_DYING || action == CPU_DYING_FROZEN)
vfp_current_hw_state[(long)hcpu] = NULL;
else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
vfp_enable(NULL);
return NOTIFY_OK;
vfp_force_reload(cpu, current_thread_info());
return 0;
}
static int vfp_starting_cpu(unsigned int unused)
{
vfp_enable(NULL);
return 0;
}
void vfp_kmode_exception(void)
......@@ -732,6 +732,10 @@ static int __init vfp_init(void)
unsigned int vfpsid;
unsigned int cpu_arch = cpu_architecture();
/*
* Enable the access to the VFP on all online CPUs so the
* following test on FPSID will succeed.
*/
if (cpu_arch >= CPU_ARCH_ARMv6)
on_each_cpu(vfp_enable, NULL, 1);
......@@ -794,7 +798,9 @@ static int __init vfp_init(void)
VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
}
hotcpu_notifier(vfp_hotplug, 0);
cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
"AP_ARM_VFP_STARTING", vfp_starting_cpu,
vfp_dying_cpu);
vfp_vector = vfp_support_entry;
......
......@@ -153,12 +153,11 @@ static struct notifier_block xen_pvclock_gtod_notifier = {
.notifier_call = xen_pvclock_gtod_notify,
};
static void xen_percpu_init(void)
static int xen_starting_cpu(unsigned int cpu)
{
struct vcpu_register_vcpu_info info;
struct vcpu_info *vcpup;
int err;
int cpu = get_cpu();
/*
* VCPUOP_register_vcpu_info cannot be called twice for the same
......@@ -186,7 +185,13 @@ static void xen_percpu_init(void)
after_register_vcpu_info:
enable_percpu_irq(xen_events_irq, 0);
put_cpu();
return 0;
}
static int xen_dying_cpu(unsigned int cpu)
{
disable_percpu_irq(xen_events_irq);
return 0;
}
static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
......@@ -205,28 +210,6 @@ static void xen_power_off(void)
BUG_ON(rc);
}
static int xen_cpu_notification(struct notifier_block *self,
unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_STARTING:
xen_percpu_init();
break;
case CPU_DYING:
disable_percpu_irq(xen_events_irq);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block xen_cpu_notifier = {
.notifier_call = xen_cpu_notification,
};
static irqreturn_t xen_arm_callback(int irq, void *arg)
{
xen_hvm_evtchn_do_upcall();
......@@ -425,16 +408,14 @@ static int __init xen_guest_init(void)
return -EINVAL;
}
xen_percpu_init();
register_cpu_notifier(&xen_cpu_notifier);
xen_time_setup_guest();
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
return 0;
return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
"AP_ARM_XEN_STARTING", xen_starting_cpu,
xen_dying_cpu);
}
early_initcall(xen_guest_init);
......
......@@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable)
* 0 - If all the hooks ran successfully.
* -EINVAL - At least one hook is not supported by the CPU.
*/
static int run_all_insn_set_hw_mode(unsigned long cpu)
static int run_all_insn_set_hw_mode(unsigned int cpu)
{
int rc = 0;
unsigned long flags;
......@@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu)
list_for_each_entry(insn, &insn_emulation, node) {
bool enable = (insn->current_mode == INSN_HW);
if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
pr_warn("CPU[%ld] cannot support the emulation of %s",
pr_warn("CPU[%u] cannot support the emulation of %s",
cpu, insn->ops->name);
rc = -EINVAL;
}
......@@ -611,20 +611,6 @@ static struct insn_emulation_ops setend_ops = {
.set_hw_mode = setend_set_hw_mode,
};
static int insn_cpu_hotplug_notify(struct notifier_block *b,
unsigned long action, void *hcpu)
{
int rc = 0;
if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
return notifier_from_errno(rc);
}
static struct notifier_block insn_cpu_hotplug_notifier = {
.notifier_call = insn_cpu_hotplug_notify,
};
/*
* Invoked as late_initcall, since not needed before init spawned.
*/
......@@ -643,7 +629,9 @@ static int __init armv8_deprecated_init(void)
pr_info("setend instruction emulation is not supported on the system");
}
register_cpu_notifier(&insn_cpu_hotplug_notifier);
cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
"AP_ARM64_ISNDEP_STARTING",
run_all_insn_set_hw_mode, NULL);
register_insn_emulation_sysctl(ctl_abi);
return 0;
......
......@@ -453,29 +453,13 @@ static struct pmu pmu = {
.read = bfin_pmu_read,
};
static void bfin_pmu_setup(int cpu)
static int bfin_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
bfin_write_PFCTL(0);
memset(cpuhw, 0, sizeof(struct cpu_hw_events));
}
static int
bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
bfin_write_PFCTL(0);
bfin_pmu_setup(cpu);
break;
default:
break;
}
return NOTIFY_OK;
return 0;
}
static int __init bfin_pmu_init(void)
......@@ -491,8 +475,8 @@ static int __init bfin_pmu_init(void)
ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
if (!ret)
perf_cpu_notifier(bfin_pmu_notifier);
cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
bfin_pmu_prepare_cpu, NULL);
return ret;
}
early_initcall(bfin_pmu_init);
......@@ -806,25 +806,16 @@ static struct metag_pmu _metag_pmu = {
};
/* PMU CPU hotplug notifier */
static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
void *hcpu)
static int metag_pmu_starting_cpu(unsigned int cpu)
{
unsigned int cpu = (unsigned int)hcpu;
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
return NOTIFY_DONE;
memset(cpuc, 0, sizeof(struct cpu_hw_events));
raw_spin_lock_init(&cpuc->pmu_lock);
return NOTIFY_OK;
return 0;
}
static struct notifier_block metag_pmu_notifier = {
.notifier_call = metag_pmu_cpu_notify,
};
/* PMU Initialisation */
static int __init init_hw_perf_events(void)
{
......@@ -876,16 +867,13 @@ static int __init init_hw_perf_events(void)
metag_out32(0, PERF_COUNT(0));
metag_out32(0, PERF_COUNT(1));
for_each_possible_cpu(cpu) {
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
"AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
NULL);
memset(cpuc, 0, sizeof(struct cpu_hw_events));
raw_spin_lock_init(&cpuc->pmu_lock);
}
register_cpu_notifier(&metag_pmu_notifier);
ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
out:
if (ret)
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING);
return ret;
}
early_initcall(init_hw_perf_events);
......@@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void)
return handled;
}
static int loongson3_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
static int loongson3_starting_cpu(unsigned int cpu)
{
switch (action) {
case CPU_STARTING:
case CPU_STARTING_FROZEN:
write_c0_perflo1(reg.control1);
write_c0_perflo2(reg.control2);
break;
case CPU_DYING:
case CPU_DYING_FROZEN:
write_c0_perflo1(0xc0000000);
write_c0_perflo2(0x40000000);
break;
}
return NOTIFY_OK;
write_c0_perflo1(reg.control1);
write_c0_perflo2(reg.control2);
return 0;
}
static struct notifier_block loongson3_notifier_block = {
.notifier_call = loongson3_cpu_callback
};
static int loongson3_dying_cpu(unsigned int cpu)
{
write_c0_perflo1(0xc0000000);
write_c0_perflo2(0x40000000);
return 0;
}
static int __init loongson3_init(void)
{
on_each_cpu(reset_counters, NULL, 1);
register_hotcpu_notifier(&loongson3_notifier_block);
cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
"AP_MIPS_OP_LOONGSON3_STARTING",
loongson3_starting_cpu, loongson3_dying_cpu);
save_perf_irq = perf_irq;
perf_irq = loongson3_perfcount_handler;
......@@ -204,7 +197,7 @@ static int __init loongson3_init(void)
static void loongson3_exit(void)
{
on_each_cpu(reset_counters, NULL, 1);
unregister_hotcpu_notifier(&loongson3_notifier_block);
cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
perf_irq = save_perf_irq;
}
......
......@@ -581,30 +581,22 @@ static void verify_cpu_node_mapping(int cpu, int node)
}
}
static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
void *hcpu)
/* Must run before sched domains notifier. */
static int ppc_numa_cpu_prepare(unsigned int cpu)
{
unsigned long lcpu = (unsigned long)hcpu;
int ret = NOTIFY_DONE, nid;
int nid;
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
nid = numa_setup_cpu(lcpu);
verify_cpu_node_mapping((int)lcpu, nid);
ret = NOTIFY_OK;
break;
nid = numa_setup_cpu(cpu);
verify_cpu_node_mapping(cpu, nid);
return 0;
}
static int ppc_numa_cpu_dead(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
unmap_cpu_from_node(lcpu);
ret = NOTIFY_OK;
break;
unmap_cpu_from_node(cpu);
#endif
}
return ret;
return 0;
}
/*
......@@ -913,11 +905,6 @@ static void __init dump_numa_memory_topology(void)
}
}
static struct notifier_block ppc64_numa_nb = {
.notifier_call = cpu_numa_callback,
.priority = 1 /* Must run before sched domains notifier. */
};
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
{
......@@ -985,15 +972,18 @@ void __init initmem_init(void)
setup_node_to_cpumask_map();
reset_numa_cpu_lookup_table();
register_cpu_notifier(&ppc64_numa_nb);
/*
* We need the numa_cpu_lookup_table to be accurate for all CPUs,
* even before we online them, so that we can use cpu_to_{node,mem}
* early in boot, cf. smp_prepare_cpus().
* _nocalls() + manual invocation is used because cpuhp is not yet
* initialized for the boot CPU.
*/
for_each_present_cpu(cpu) {
numa_setup_cpu((unsigned long)cpu);
}
cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
for_each_present_cpu(cpu)
numa_setup_cpu(cpu);
}
static int __init early_numa(char *p)
......
......@@ -2158,31 +2158,15 @@ static void perf_event_interrupt(struct pt_regs *regs)
irq_exit();
}
static void power_pmu_setup(int cpu)
int power_pmu_prepare_cpu(unsigned int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
if (!ppmu)
return;
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
static int
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
power_pmu_setup(cpu);
break;
default:
break;
if (ppmu) {
memset(cpuhw, 0, sizeof(*cpuhw));
cpuhw->mmcr[0] = MMCR0_FC;
}
return NOTIFY_OK;
return 0;
}
int register_power_pmu(struct power_pmu *pmu)
......@@ -2205,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu)
#endif /* CONFIG_PPC64 */
perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
perf_cpu_notifier(power_pmu_notifier);
cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
power_pmu_prepare_cpu, NULL);
return 0;
}
......@@ -664,30 +664,22 @@ static struct pmu cpumf_pmu = {
.cancel_txn = cpumf_pmu_cancel_txn,
};
static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
void *hcpu)
static int cpumf_pmf_setup(unsigned int cpu, int flags)
{
int flags;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
flags = PMC_INIT;
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
case CPU_DOWN_PREPARE:
flags = PMC_RELEASE;
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
break;
default:
break;
}
local_irq_disable();
setup_pmc_cpu(&flags);
local_irq_enable();
return 0;
}
static int s390_pmu_online_cpu(unsigned int cpu)
{
return cpumf_pmf_setup(cpu, PMC_INIT);
}
return NOTIFY_OK;