Merge branch 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull smp hotplug updates from Thomas Gleixner:
 "This is the next part of the hotplug rework.

   - Convert all notifiers with a priority assigned

   - Convert all CPU_STARTING/DYING notifiers

     The final removal of the STARTING/DYING infrastructure will happen
     when the merge window closes.

  Another 700 hundred line of unpenetrable maze gone :)"

* 'smp-hotplug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits)
  timers/core: Correct callback order during CPU hot plug
  leds/trigger/cpu: Move from CPU_STARTING to ONLINE level
  powerpc/numa: Convert to hotplug state machine
  arm/perf: Fix hotplug state machine conversion
  irqchip/armada: Avoid unused function warnings
  ARC/time: Convert to hotplug state machine
  clocksource/atlas7: Convert to hotplug state machine
  clocksource/armada-370-xp: Convert to hotplug state machine
  clocksource/exynos_mct: Convert to hotplug state machine
  clocksource/arm_global_timer: Convert to hotplug state machine
  rcu: Convert rcutree to hotplug state machine
  KVM/arm/arm64/vgic-new: Convert to hotplug state machine
  smp/cfd: Convert core to hotplug state machine
  x86/x2apic: Convert to CPU hotplug state machine
  profile: Convert to hotplug state machine
  timers/core: Convert to hotplug state machine
  hrtimer: Convert to hotplug state machine
  x86/tboot: Convert to hotplug state machine
  arm64/armv8 deprecated: Convert to hotplug state machine
  hwtracing/coresight-etm4x: Convert to hotplug state machine
  ...
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 98f22d2..f927b8d 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -296,30 +296,23 @@
 	return IRQ_HANDLED;
 }
 
-static int arc_timer_cpu_notify(struct notifier_block *self,
-				unsigned long action, void *hcpu)
+
+static int arc_timer_starting_cpu(unsigned int cpu)
 {
 	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
 
 	evt->cpumask = cpumask_of(smp_processor_id());
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		clockevents_config_and_register(evt, arc_timer_freq,
-						0, ULONG_MAX);
-		enable_percpu_irq(arc_timer_irq, 0);
-		break;
-	case CPU_DYING:
-		disable_percpu_irq(arc_timer_irq);
-		break;
-	}
-
-	return NOTIFY_OK;
+	clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX);
+	enable_percpu_irq(arc_timer_irq, 0);
+	return 0;
 }
 
-static struct notifier_block arc_timer_cpu_nb = {
-	.notifier_call = arc_timer_cpu_notify,
-};
+static int arc_timer_dying_cpu(unsigned int cpu)
+{
+	disable_percpu_irq(arc_timer_irq);
+	return 0;
+}
 
 /*
  * clockevent setup for boot CPU
@@ -329,12 +322,6 @@
 	struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
 	int ret;
 
-	ret = register_cpu_notifier(&arc_timer_cpu_nb);
-	if (ret) {
-		pr_err("Failed to register cpu notifier");
-		return ret;
-	}
-
 	arc_timer_irq = irq_of_parse_and_map(node, 0);
 	if (arc_timer_irq <= 0) {
 		pr_err("clockevent: missing irq");
@@ -347,11 +334,6 @@
 		return ret;
 	}
 
-	evt->irq = arc_timer_irq;
-	evt->cpumask = cpumask_of(smp_processor_id());
-	clockevents_config_and_register(evt, arc_timer_freq,
-					0, ARC_TIMER_MAX);
-
 	/* Needs apriori irq_set_percpu_devid() done in intc map function */
 	ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
 				 "Timer0 (per-cpu-tick)", evt);
@@ -360,8 +342,14 @@
 		return ret;
 	}
 
-	enable_percpu_irq(arc_timer_irq, 0);
-
+	ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING,
+				"AP_ARC_TIMER_STARTING",
+				arc_timer_starting_cpu,
+				arc_timer_dying_cpu);
+	if (ret) {
+		pr_err("Failed to setup hotplug state");
+		return ret;
+	}
 	return 0;
 }
 
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index b6ec65e..02d5e5e 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -310,24 +310,17 @@
 	enable_percpu_irq(clk->irq, 0);
 }
 
-static int twd_timer_cpu_notify(struct notifier_block *self,
-				unsigned long action, void *hcpu)
+static int twd_timer_starting_cpu(unsigned int cpu)
 {
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		twd_timer_setup();
-		break;
-	case CPU_DYING:
-		twd_timer_stop();
-		break;
-	}
-
-	return NOTIFY_OK;
+	twd_timer_setup();
+	return 0;
 }
 
-static struct notifier_block twd_timer_cpu_nb = {
-	.notifier_call = twd_timer_cpu_notify,
-};
+static int twd_timer_dying_cpu(unsigned int cpu)
+{
+	twd_timer_stop();
+	return 0;
+}
 
 static int __init twd_local_timer_common_register(struct device_node *np)
 {
@@ -345,9 +338,9 @@
 		goto out_free;
 	}
 
-	err = register_cpu_notifier(&twd_timer_cpu_nb);
-	if (err)
-		goto out_irq;
+	cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING,
+				  "AP_ARM_TWD_STARTING",
+				  twd_timer_starting_cpu, twd_timer_dying_cpu);
 
 	twd_get_clock(np);
 	if (!of_property_read_bool(np, "always-on"))
@@ -365,8 +358,6 @@
 
 	return 0;
 
-out_irq:
-	free_percpu_irq(twd_ppi, twd_evt);
 out_free:
 	iounmap(twd_base);
 	twd_base = NULL;
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index e80f0dd..ae2a018 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -111,20 +111,12 @@
 	.notifier_call = mvebu_hwcc_notifier,
 };
 
-static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
+static int armada_xp_clear_l2_starting(unsigned int cpu)
 {
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		armada_xp_clear_shared_l2();
-
-	return NOTIFY_OK;
+	armada_xp_clear_shared_l2();
+	return 0;
 }
 
-static struct notifier_block armada_xp_clear_shared_l2_notifier = {
-	.notifier_call = armada_xp_clear_shared_l2_notifier_func,
-	.priority = 100,
-};
-
 static void __init armada_370_coherency_init(struct device_node *np)
 {
 	struct resource res;
@@ -155,8 +147,9 @@
 
 	of_node_put(cpu_config_np);
 
-	register_cpu_notifier(&armada_xp_clear_shared_l2_notifier);
-
+	cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY,
+				  "AP_ARM_MVEBU_COHERENCY",
+				  armada_xp_clear_l2_starting, NULL);
 exit:
 	set_cpu_coherent();
 }
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c61996c..cc12905 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -597,17 +597,16 @@
 			      L310_POWER_CTRL);
 }
 
-static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
+static int l2c310_starting_cpu(unsigned int cpu)
 {
-	switch (act & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
-		break;
-	case CPU_DYING:
-		set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
-		break;
-	}
-	return NOTIFY_OK;
+	set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
+	return 0;
+}
+
+static int l2c310_dying_cpu(unsigned int cpu)
+{
+	set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
+	return 0;
 }
 
 static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
@@ -678,10 +677,10 @@
 			power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
 	}
 
-	if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
-		set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
-		cpu_notifier(l2c310_cpu_enable_flz, 0);
-	}
+	if (aux & L310_AUX_CTRL_FULL_LINE_ZERO)
+		cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING,
+				  "AP_ARM_L2X0_STARTING", l2c310_starting_cpu,
+				  l2c310_dying_cpu);
 }
 
 static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 73085d3..da0b33d 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -643,19 +643,19 @@
  * hardware state at every thread switch.  We clear our held state when
  * a CPU has been killed, indicating that the VFP hardware doesn't contain
  * a threads VFP state.  When a CPU starts up, we re-enable access to the
- * VFP hardware.
- *
- * Both CPU_DYING and CPU_STARTING are called on the CPU which
+ * VFP hardware. The callbacks below are called on the CPU which
  * is being offlined/onlined.
  */
-static int vfp_hotplug(struct notifier_block *b, unsigned long action,
-	void *hcpu)
+static int vfp_dying_cpu(unsigned int cpu)
 {
-	if (action == CPU_DYING || action == CPU_DYING_FROZEN)
-		vfp_current_hw_state[(long)hcpu] = NULL;
-	else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		vfp_enable(NULL);
-	return NOTIFY_OK;
+	vfp_force_reload(cpu, current_thread_info());
+	return 0;
+}
+
+static int vfp_starting_cpu(unsigned int unused)
+{
+	vfp_enable(NULL);
+	return 0;
 }
 
 void vfp_kmode_exception(void)
@@ -732,6 +732,10 @@
 	unsigned int vfpsid;
 	unsigned int cpu_arch = cpu_architecture();
 
+	/*
+	 * Enable the access to the VFP on all online CPUs so the
+	 * following test on FPSID will succeed.
+	 */
 	if (cpu_arch >= CPU_ARCH_ARMv6)
 		on_each_cpu(vfp_enable, NULL, 1);
 
@@ -794,7 +798,9 @@
 		VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
 	}
 
-	hotcpu_notifier(vfp_hotplug, 0);
+	cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING,
+				  "AP_ARM_VFP_STARTING", vfp_starting_cpu,
+				  vfp_dying_cpu);
 
 	vfp_vector = vfp_support_entry;
 
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 0bea3d2..b0b82f5 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -153,12 +153,11 @@
 	.notifier_call = xen_pvclock_gtod_notify,
 };
 
-static void xen_percpu_init(void)
+static int xen_starting_cpu(unsigned int cpu)
 {
 	struct vcpu_register_vcpu_info info;
 	struct vcpu_info *vcpup;
 	int err;
-	int cpu = get_cpu();
 
 	/* 
 	 * VCPUOP_register_vcpu_info cannot be called twice for the same
@@ -186,7 +185,13 @@
 
 after_register_vcpu_info:
 	enable_percpu_irq(xen_events_irq, 0);
-	put_cpu();
+	return 0;
+}
+
+static int xen_dying_cpu(unsigned int cpu)
+{
+	disable_percpu_irq(xen_events_irq);
+	return 0;
 }
 
 static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
@@ -205,28 +210,6 @@
 	BUG_ON(rc);
 }
 
-static int xen_cpu_notification(struct notifier_block *self,
-				unsigned long action,
-				void *hcpu)
-{
-	switch (action) {
-	case CPU_STARTING:
-		xen_percpu_init();
-		break;
-	case CPU_DYING:
-		disable_percpu_irq(xen_events_irq);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block xen_cpu_notifier = {
-	.notifier_call = xen_cpu_notification,
-};
-
 static irqreturn_t xen_arm_callback(int irq, void *arg)
 {
 	xen_hvm_evtchn_do_upcall();
@@ -425,16 +408,14 @@
 		return -EINVAL;
 	}
 
-	xen_percpu_init();
-
-	register_cpu_notifier(&xen_cpu_notifier);
-
 	xen_time_setup_guest();
 
 	if (xen_initial_domain())
 		pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
 
-	return 0;
+	return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
+				 "AP_ARM_XEN_STARTING", xen_starting_cpu,
+				 xen_dying_cpu);
 }
 early_initcall(xen_guest_init);
 
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 5f72475..42ffdb5 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -121,7 +121,7 @@
  *  0 		- If all the hooks ran successfully.
  * -EINVAL	- At least one hook is not supported by the CPU.
  */
-static int run_all_insn_set_hw_mode(unsigned long cpu)
+static int run_all_insn_set_hw_mode(unsigned int cpu)
 {
 	int rc = 0;
 	unsigned long flags;
@@ -131,7 +131,7 @@
 	list_for_each_entry(insn, &insn_emulation, node) {
 		bool enable = (insn->current_mode == INSN_HW);
 		if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) {
-			pr_warn("CPU[%ld] cannot support the emulation of %s",
+			pr_warn("CPU[%u] cannot support the emulation of %s",
 				cpu, insn->ops->name);
 			rc = -EINVAL;
 		}
@@ -611,20 +611,6 @@
 	.set_hw_mode = setend_set_hw_mode,
 };
 
-static int insn_cpu_hotplug_notify(struct notifier_block *b,
-			      unsigned long action, void *hcpu)
-{
-	int rc = 0;
-	if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
-		rc = run_all_insn_set_hw_mode((unsigned long)hcpu);
-
-	return notifier_from_errno(rc);
-}
-
-static struct notifier_block insn_cpu_hotplug_notifier = {
-	.notifier_call = insn_cpu_hotplug_notify,
-};
-
 /*
  * Invoked as late_initcall, since not needed before init spawned.
  */
@@ -643,7 +629,9 @@
 			pr_info("setend instruction emulation is not supported on the system");
 	}
 
-	register_cpu_notifier(&insn_cpu_hotplug_notifier);
+	cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING,
+				  "AP_ARM64_ISNDEP_STARTING",
+				  run_all_insn_set_hw_mode, NULL);
 	register_insn_emulation_sysctl(ctl_abi);
 
 	return 0;
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index 170d786..6355e97 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -453,29 +453,13 @@
 	.read        = bfin_pmu_read,
 };
 
-static void bfin_pmu_setup(int cpu)
+static int bfin_pmu_prepare_cpu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
+	bfin_write_PFCTL(0);
 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
-}
-
-static int
-bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		bfin_write_PFCTL(0);
-		bfin_pmu_setup(cpu);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 static int __init bfin_pmu_init(void)
@@ -491,8 +475,8 @@
 
 	ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
 	if (!ret)
-		perf_cpu_notifier(bfin_pmu_notifier);
-
+		cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN",
+				  bfin_pmu_prepare_cpu, NULL);
 	return ret;
 }
 early_initcall(bfin_pmu_init);
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 33a365f..052cba2 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -806,25 +806,16 @@
 };
 
 /* PMU CPU hotplug notifier */
-static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action,
-				void *hcpu)
+static int metag_pmu_starting_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned int)hcpu;
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
 
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
-
 	memset(cpuc, 0, sizeof(struct cpu_hw_events));
 	raw_spin_lock_init(&cpuc->pmu_lock);
 
-	return NOTIFY_OK;
+	return 0;
 }
 
-static struct notifier_block metag_pmu_notifier = {
-	.notifier_call = metag_pmu_cpu_notify,
-};
-
 /* PMU Initialisation */
 static int __init init_hw_perf_events(void)
 {
@@ -876,16 +867,13 @@
 	metag_out32(0, PERF_COUNT(0));
 	metag_out32(0, PERF_COUNT(1));
 
-	for_each_possible_cpu(cpu) {
-		struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+	cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING,
+			  "AP_PERF_METAG_STARTING", metag_pmu_starting_cpu,
+			  NULL);
 
-		memset(cpuc, 0, sizeof(struct cpu_hw_events));
-		raw_spin_lock_init(&cpuc->pmu_lock);
-	}
-
-	register_cpu_notifier(&metag_pmu_notifier);
 	ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW);
-out:
+	if (ret)
+		cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING);
 	return ret;
 }
 early_initcall(init_hw_perf_events);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
index 8bcf7fc..85f3ee4 100644
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -168,33 +168,26 @@
 	return handled;
 }
 
-static int loongson3_cpu_callback(struct notifier_block *nfb,
-	unsigned long action, void *hcpu)
+static int loongson3_starting_cpu(unsigned int cpu)
 {
-	switch (action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
-		write_c0_perflo1(reg.control1);
-		write_c0_perflo2(reg.control2);
-		break;
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		write_c0_perflo1(0xc0000000);
-		write_c0_perflo2(0x40000000);
-		break;
-	}
-
-	return NOTIFY_OK;
+	write_c0_perflo1(reg.control1);
+	write_c0_perflo2(reg.control2);
+	return 0;
 }
 
-static struct notifier_block loongson3_notifier_block = {
-	.notifier_call = loongson3_cpu_callback
-};
+static int loongson3_dying_cpu(unsigned int cpu)
+{
+	write_c0_perflo1(0xc0000000);
+	write_c0_perflo2(0x40000000);
+	return 0;
+}
 
 static int __init loongson3_init(void)
 {
 	on_each_cpu(reset_counters, NULL, 1);
-	register_hotcpu_notifier(&loongson3_notifier_block);
+	cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+				  "AP_MIPS_OP_LOONGSON3_STARTING",
+				  loongson3_starting_cpu, loongson3_dying_cpu);
 	save_perf_irq = perf_irq;
 	perf_irq = loongson3_perfcount_handler;
 
@@ -204,7 +197,7 @@
 static void loongson3_exit(void)
 {
 	on_each_cpu(reset_counters, NULL, 1);
-	unregister_hotcpu_notifier(&loongson3_notifier_block);
+	cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
 	perf_irq = save_perf_irq;
 }
 
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 669a15e..6dc07dd 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -581,30 +581,22 @@
 	}
 }
 
-static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
-			     void *hcpu)
+/* Must run before sched domains notifier. */
+static int ppc_numa_cpu_prepare(unsigned int cpu)
 {
-	unsigned long lcpu = (unsigned long)hcpu;
-	int ret = NOTIFY_DONE, nid;
+	int nid;
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		nid = numa_setup_cpu(lcpu);
-		verify_cpu_node_mapping((int)lcpu, nid);
-		ret = NOTIFY_OK;
-		break;
+	nid = numa_setup_cpu(cpu);
+	verify_cpu_node_mapping(cpu, nid);
+	return 0;
+}
+
+static int ppc_numa_cpu_dead(unsigned int cpu)
+{
 #ifdef CONFIG_HOTPLUG_CPU
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-		unmap_cpu_from_node(lcpu);
-		ret = NOTIFY_OK;
-		break;
+	unmap_cpu_from_node(cpu);
 #endif
-	}
-	return ret;
+	return 0;
 }
 
 /*
@@ -913,11 +905,6 @@
 	}
 }
 
-static struct notifier_block ppc64_numa_nb = {
-	.notifier_call = cpu_numa_callback,
-	.priority = 1 /* Must run before sched domains notifier. */
-};
-
 /* Initialize NODE_DATA for a node on the local memory */
 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
 {
@@ -985,15 +972,18 @@
 	setup_node_to_cpumask_map();
 
 	reset_numa_cpu_lookup_table();
-	register_cpu_notifier(&ppc64_numa_nb);
+
 	/*
 	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
 	 * even before we online them, so that we can use cpu_to_{node,mem}
 	 * early in boot, cf. smp_prepare_cpus().
+	 * _nocalls() + manual invocation is used because cpuhp is not yet
+	 * initialized for the boot CPU.
 	 */
-	for_each_present_cpu(cpu) {
-		numa_setup_cpu((unsigned long)cpu);
-	}
+	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE",
+				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
+	for_each_present_cpu(cpu)
+		numa_setup_cpu(cpu);
 }
 
 static int __init early_numa(char *p)
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 97a1d40..ffd61d5 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -2158,31 +2158,15 @@
 		irq_exit();
 }
 
-static void power_pmu_setup(int cpu)
+int power_pmu_prepare_cpu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
-	if (!ppmu)
-		return;
-	memset(cpuhw, 0, sizeof(*cpuhw));
-	cpuhw->mmcr[0] = MMCR0_FC;
-}
-
-static int
-power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		power_pmu_setup(cpu);
-		break;
-
-	default:
-		break;
+	if (ppmu) {
+		memset(cpuhw, 0, sizeof(*cpuhw));
+		cpuhw->mmcr[0] = MMCR0_FC;
 	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 int register_power_pmu(struct power_pmu *pmu)
@@ -2205,7 +2189,7 @@
 #endif /* CONFIG_PPC64 */
 
 	perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
-	perf_cpu_notifier(power_pmu_notifier);
-
+	cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER",
+			  power_pmu_prepare_cpu, NULL);
 	return 0;
 }
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 7ec63b1..037c2a2 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -664,30 +664,22 @@
 	.cancel_txn   = cpumf_pmu_cancel_txn,
 };
 
-static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action,
-			      void *hcpu)
+static int cpumf_pmf_setup(unsigned int cpu, int flags)
 {
-	int flags;
+	local_irq_disable();
+	setup_pmc_cpu(&flags);
+	local_irq_enable();
+	return 0;
+}
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		flags = PMC_INIT;
-		local_irq_disable();
-		setup_pmc_cpu(&flags);
-		local_irq_enable();
-		break;
-	case CPU_DOWN_PREPARE:
-		flags = PMC_RELEASE;
-		local_irq_disable();
-		setup_pmc_cpu(&flags);
-		local_irq_enable();
-		break;
-	default:
-		break;
-	}
+static int s390_pmu_online_cpu(unsigned int cpu)
+{
+	return cpumf_pmf_setup(cpu, PMC_INIT);
+}
 
-	return NOTIFY_OK;
+static int s390_pmu_offline_cpu(unsigned int cpu)
+{
+	return cpumf_pmf_setup(cpu, PMC_RELEASE);
 }
 
 static int __init cpumf_pmu_init(void)
@@ -707,7 +699,7 @@
 	if (rc) {
 		pr_err("Registering for CPU-measurement alerts "
 		       "failed with rc=%i\n", rc);
-		goto out;
+		return rc;
 	}
 
 	cpumf_pmu.attr_groups = cpumf_cf_event_group();
@@ -716,10 +708,10 @@
 		pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
 		unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
 					cpumf_measurement_alert);
-		goto out;
+		return rc;
 	}
-	perf_cpu_notifier(cpumf_pmu_notifier);
-out:
-	return rc;
+	return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
+				 "AP_PERF_S390_CF_ONLINE",
+				 s390_pmu_online_cpu, s390_pmu_offline_cpu);
 }
 early_initcall(cpumf_pmu_init);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 53acf2d..fcc634c 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1504,37 +1504,28 @@
 		sf_disable();
 	}
 }
-
-static int cpumf_pmu_notifier(struct notifier_block *self,
-			      unsigned long action, void *hcpu)
+static int cpusf_pmu_setup(unsigned int cpu, int flags)
 {
-	int flags;
-
 	/* Ignore the notification if no events are scheduled on the PMU.
 	 * This might be racy...
 	 */
 	if (!atomic_read(&num_events))
-		return NOTIFY_OK;
+		return 0;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		flags = PMC_INIT;
-		local_irq_disable();
-		setup_pmc_cpu(&flags);
-		local_irq_enable();
-		break;
-	case CPU_DOWN_PREPARE:
-		flags = PMC_RELEASE;
-		local_irq_disable();
-		setup_pmc_cpu(&flags);
-		local_irq_enable();
-		break;
-	default:
-		break;
-	}
+	local_irq_disable();
+	setup_pmc_cpu(&flags);
+	local_irq_enable();
+	return 0;
+}
 
-	return NOTIFY_OK;
+static int s390_pmu_sf_online_cpu(unsigned int cpu)
+{
+	return cpusf_pmu_setup(cpu, PMC_INIT);
+}
+
+static int s390_pmu_sf_offline_cpu(unsigned int cpu)
+{
+	return cpusf_pmu_setup(cpu, PMC_RELEASE);
 }
 
 static int param_get_sfb_size(char *buffer, const struct kernel_param *kp)
@@ -1634,7 +1625,9 @@
 					cpumf_measurement_alert);
 		goto out;
 	}
-	perf_cpu_notifier(cpumf_pmu_notifier);
+
+	cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE",
+			  s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu);
 out:
 	return err;
 }
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 4dca183..ba3269a 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -352,28 +352,12 @@
 	.read		= sh_pmu_read,
 };
 
-static void sh_pmu_setup(int cpu)
+static int sh_pmu_prepare_cpu(unsigned int cpu)
 {
 	struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
 
 	memset(cpuhw, 0, sizeof(struct cpu_hw_events));
-}
-
-static int
-sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		sh_pmu_setup(cpu);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 int register_sh_pmu(struct sh_pmu *_pmu)
@@ -394,6 +378,7 @@
 	WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
 	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-	perf_cpu_notifier(sh_pmu_notifier);
+	cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu,
+			  NULL);
 	return 0;
 }
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 3329844..f840766 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -331,15 +331,9 @@
 	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
 }
 
-static int
-vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
+static int vgetcpu_online(unsigned int cpu)
 {
-	long cpu = (long)arg;
-
-	if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-		smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
-
-	return NOTIFY_DONE;
+	return smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
 }
 
 static int __init init_vdso(void)
@@ -350,15 +344,9 @@
 	init_vdso_image(&vdso_image_x32);
 #endif
 
-	cpu_notifier_register_begin();
-
-	on_each_cpu(vgetcpu_cpu_init, NULL, 1);
 	/* notifier priority > KVM */
-	__hotcpu_notifier(vgetcpu_cpu_notifier, 30);
-
-	cpu_notifier_register_done();
-
-	return 0;
+	return cpuhp_setup_state(CPUHP_AP_X86_VDSO_VMA_ONLINE,
+				 "AP_X86_VDSO_VMA_ONLINE", vgetcpu_online, NULL);
 }
 subsys_initcall(init_vdso);
 #endif /* CONFIG_X86_64 */
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index bd3e842..e07a22b 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -370,13 +370,13 @@
 	WARN_ON_ONCE(cpuc->amd_nb);
 
 	if (!x86_pmu.amd_nb_constraints)
-		return NOTIFY_OK;
+		return 0;
 
 	cpuc->amd_nb = amd_alloc_nb(cpu);
 	if (!cpuc->amd_nb)
-		return NOTIFY_BAD;
+		return -ENOMEM;
 
-	return NOTIFY_OK;
+	return 0;
 }
 
 static void amd_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 72dea2f..155ea53 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -725,13 +725,10 @@
 	return ret;
 }
 
-static __init int perf_event_ibs_init(void)
+static __init void perf_event_ibs_init(void)
 {
 	struct attribute **attr = ibs_op_format_attrs;
 
-	if (!ibs_caps)
-		return -ENODEV;	/* ibs not supported by the cpu */
-
 	perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
 
 	if (ibs_caps & IBS_CAPS_OPCNT) {
@@ -742,13 +739,11 @@
 
 	register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
 	pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
-
-	return 0;
 }
 
 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
 
-static __init int perf_event_ibs_init(void) { return 0; }
+static __init void perf_event_ibs_init(void) { }
 
 #endif
 
@@ -925,7 +920,7 @@
 	return val & IBSCTL_LVT_OFFSET_MASK;
 }
 
-static void setup_APIC_ibs(void *dummy)
+static void setup_APIC_ibs(void)
 {
 	int offset;
 
@@ -940,7 +935,7 @@
 		smp_processor_id());
 }
 
-static void clear_APIC_ibs(void *dummy)
+static void clear_APIC_ibs(void)
 {
 	int offset;
 
@@ -949,18 +944,24 @@
 		setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
 }
 
+static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu)
+{
+	setup_APIC_ibs();
+	return 0;
+}
+
 #ifdef CONFIG_PM
 
 static int perf_ibs_suspend(void)
 {
-	clear_APIC_ibs(NULL);
+	clear_APIC_ibs();
 	return 0;
 }
 
 static void perf_ibs_resume(void)
 {
 	ibs_eilvt_setup();
-	setup_APIC_ibs(NULL);
+	setup_APIC_ibs();
 }
 
 static struct syscore_ops perf_ibs_syscore_ops = {
@@ -979,27 +980,15 @@
 
 #endif
 
-static int
-perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu)
 {
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		setup_APIC_ibs(NULL);
-		break;
-	case CPU_DYING:
-		clear_APIC_ibs(NULL);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	clear_APIC_ibs();
+	return 0;
 }
 
 static __init int amd_ibs_init(void)
 {
 	u32 caps;
-	int ret = -EINVAL;
 
 	caps = __get_ibs_caps();
 	if (!caps)
@@ -1008,22 +997,25 @@
 	ibs_eilvt_setup();
 
 	if (!ibs_eilvt_valid())
-		goto out;
+		return -EINVAL;
 
 	perf_ibs_pm_init();
-	cpu_notifier_register_begin();
+
 	ibs_caps = caps;
 	/* make ibs_caps visible to other cpus: */
 	smp_mb();
-	smp_call_function(setup_APIC_ibs, NULL, 1);
-	__perf_cpu_notifier(perf_ibs_cpu_notifier);
-	cpu_notifier_register_done();
+	/*
+	 * x86_pmu_amd_ibs_starting_cpu will be called from core on
+	 * all online cpus.
+	 */
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+			  "AP_PERF_X86_AMD_IBS_STARTING",
+			  x86_pmu_amd_ibs_starting_cpu,
+			  x86_pmu_amd_ibs_dying_cpu);
 
-	ret = perf_event_ibs_init();
-out:
-	if (ret)
-		pr_err("Failed to setup IBS, %d\n", ret);
-	return ret;
+	perf_event_ibs_init();
+
+	return 0;
 }
 
 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c
index 55a3529..9842270 100644
--- a/arch/x86/events/amd/power.c
+++ b/arch/x86/events/amd/power.c
@@ -228,12 +228,12 @@
 	.read		= pmu_event_read,
 };
 
-static void power_cpu_exit(int cpu)
+static int power_cpu_exit(unsigned int cpu)
 {
 	int target;
 
 	if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask))
-		return;
+		return 0;
 
 	/*
 	 * Find a new CPU on the same compute unit, if was set in cpumask
@@ -245,9 +245,10 @@
 		cpumask_set_cpu(target, &cpu_mask);
 		perf_pmu_migrate_context(&pmu_class, cpu, target);
 	}
+	return 0;
 }
 
-static void power_cpu_init(int cpu)
+static int power_cpu_init(unsigned int cpu)
 {
 	int target;
 
@@ -255,7 +256,7 @@
 	 * 1) If any CPU is set at cpu_mask in the same compute unit, do
 	 * nothing.
 	 * 2) If no CPU is set at cpu_mask in the same compute unit,
-	 * set current STARTING CPU.
+	 * set current ONLINE CPU.
 	 *
 	 * Note: if there is a CPU aside of the new one already in the
 	 * sibling mask, then it is also in cpu_mask.
@@ -263,33 +264,9 @@
 	target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
 	if (target >= nr_cpumask_bits)
 		cpumask_set_cpu(cpu, &cpu_mask);
+	return 0;
 }
 
-static int
-power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_FAILED:
-	case CPU_STARTING:
-		power_cpu_init(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-		power_cpu_exit(cpu);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block power_cpu_notifier_nb = {
-	.notifier_call = power_cpu_notifier,
-	.priority = CPU_PRI_PERF,
-};
-
 static const struct x86_cpu_id cpu_match[] = {
 	{ .vendor = X86_VENDOR_AMD, .family = 0x15 },
 	{},
@@ -297,7 +274,7 @@
 
 static int __init amd_power_pmu_init(void)
 {
-	int cpu, target, ret;
+	int ret;
 
 	if (!x86_match_cpu(cpu_match))
 		return 0;
@@ -312,38 +289,25 @@
 		return -ENODEV;
 	}
 
-	cpu_notifier_register_begin();
 
-	/* Choose one online core of each compute unit. */
-	for_each_online_cpu(cpu) {
-		target = cpumask_first(topology_sibling_cpumask(cpu));
-		if (!cpumask_test_cpu(target, &cpu_mask))
-			cpumask_set_cpu(target, &cpu_mask);
-	}
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
+			  "AP_PERF_X86_AMD_POWER_ONLINE",
+			  power_cpu_init, power_cpu_exit);
 
 	ret = perf_pmu_register(&pmu_class, "power", -1);
 	if (WARN_ON(ret)) {
 		pr_warn("AMD Power PMU registration failed\n");
-		goto out;
+		return ret;
 	}
 
-	__register_cpu_notifier(&power_cpu_notifier_nb);
-
 	pr_info("AMD Power PMU detected\n");
-
-out:
-	cpu_notifier_register_done();
-
 	return ret;
 }
 module_init(amd_power_pmu_init);
 
 static void __exit amd_power_pmu_exit(void)
 {
-	cpu_notifier_register_begin();
-	__unregister_cpu_notifier(&power_cpu_notifier_nb);
-	cpu_notifier_register_done();
-
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE);
 	perf_pmu_unregister(&pmu_class);
 }
 module_exit(amd_power_pmu_exit);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index 98ac573..e6131d4 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -358,7 +358,7 @@
 	return this;
 }
 
-static void amd_uncore_cpu_starting(unsigned int cpu)
+static int amd_uncore_cpu_starting(unsigned int cpu)
 {
 	unsigned int eax, ebx, ecx, edx;
 	struct amd_uncore *uncore;
@@ -384,6 +384,8 @@
 		uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2);
 		*per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
 	}
+
+	return 0;
 }
 
 static void uncore_online(unsigned int cpu,
@@ -398,13 +400,15 @@
 		cpumask_set_cpu(cpu, uncore->active_mask);
 }
 
-static void amd_uncore_cpu_online(unsigned int cpu)
+static int amd_uncore_cpu_online(unsigned int cpu)
 {
 	if (amd_uncore_nb)
 		uncore_online(cpu, amd_uncore_nb);
 
 	if (amd_uncore_l2)
 		uncore_online(cpu, amd_uncore_l2);
+
+	return 0;
 }
 
 static void uncore_down_prepare(unsigned int cpu,
@@ -433,13 +437,15 @@
 	}
 }
 
-static void amd_uncore_cpu_down_prepare(unsigned int cpu)
+static int amd_uncore_cpu_down_prepare(unsigned int cpu)
 {
 	if (amd_uncore_nb)
 		uncore_down_prepare(cpu, amd_uncore_nb);
 
 	if (amd_uncore_l2)
 		uncore_down_prepare(cpu, amd_uncore_l2);
+
+	return 0;
 }
 
 static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
@@ -454,74 +460,19 @@
 	*per_cpu_ptr(uncores, cpu) = NULL;
 }
 
-static void amd_uncore_cpu_dead(unsigned int cpu)
+static int amd_uncore_cpu_dead(unsigned int cpu)
 {
 	if (amd_uncore_nb)
 		uncore_dead(cpu, amd_uncore_nb);
 
 	if (amd_uncore_l2)
 		uncore_dead(cpu, amd_uncore_l2);
-}
 
-static int
-amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
-			void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		if (amd_uncore_cpu_up_prepare(cpu))
-			return notifier_from_errno(-ENOMEM);
-		break;
-
-	case CPU_STARTING:
-		amd_uncore_cpu_starting(cpu);
-		break;
-
-	case CPU_ONLINE:
-		amd_uncore_cpu_online(cpu);
-		break;
-
-	case CPU_DOWN_PREPARE:
-		amd_uncore_cpu_down_prepare(cpu);
-		break;
-
-	case CPU_UP_CANCELED:
-	case CPU_DEAD:
-		amd_uncore_cpu_dead(cpu);
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block amd_uncore_cpu_notifier_block = {
-	.notifier_call	= amd_uncore_cpu_notifier,
-	.priority	= CPU_PRI_PERF + 1,
-};
-
-static void __init init_cpu_already_online(void *dummy)
-{
-	unsigned int cpu = smp_processor_id();
-
-	amd_uncore_cpu_starting(cpu);
-	amd_uncore_cpu_online(cpu);
-}
-
-static void cleanup_cpu_online(void *dummy)
-{
-	unsigned int cpu = smp_processor_id();
-
-	amd_uncore_cpu_dead(cpu);
+	return 0;
 }
 
 static int __init amd_uncore_init(void)
 {
-	unsigned int cpu, cpu2;
 	int ret = -ENODEV;
 
 	if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
@@ -558,38 +509,29 @@
 		ret = 0;
 	}
 
-	if (ret)
-		goto fail_nodev;
+	/*
+	 * Install callbacks. Core will call them for each online cpu.
+	 */
+	if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP,
+			      "PERF_X86_AMD_UNCORE_PREP",
+			      amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead))
+		goto fail_l2;
 
-	cpu_notifier_register_begin();
-
-	/* init cpus already online before registering for hotplug notifier */
-	for_each_online_cpu(cpu) {
-		ret = amd_uncore_cpu_up_prepare(cpu);
-		if (ret)
-			goto fail_online;
-		smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
-	}
-
-	__register_cpu_notifier(&amd_uncore_cpu_notifier_block);
-	cpu_notifier_register_done();
-
+	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
+			      "AP_PERF_X86_AMD_UNCORE_STARTING",
+			      amd_uncore_cpu_starting, NULL))
+		goto fail_prep;
+	if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
+			      "AP_PERF_X86_AMD_UNCORE_ONLINE",
+			      amd_uncore_cpu_online,
+			      amd_uncore_cpu_down_prepare))
+		goto fail_start;
 	return 0;
 
-
-fail_online:
-	for_each_online_cpu(cpu2) {
-		if (cpu2 == cpu)
-			break;
-		smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
-	}
-	cpu_notifier_register_done();
-
-	/* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
-	amd_uncore_nb = amd_uncore_l2 = NULL;
-
-	if (boot_cpu_has(X86_FEATURE_PERFCTR_L2))
-		perf_pmu_unregister(&amd_l2_pmu);
+fail_start:
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING);
+fail_prep:
+	cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP);
 fail_l2:
 	if (boot_cpu_has(X86_FEATURE_PERFCTR_NB))
 		perf_pmu_unregister(&amd_nb_pmu);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index dfebbde2..c17f0de 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1477,49 +1477,49 @@
 struct event_constraint emptyconstraint;
 struct event_constraint unconstrained;
 
-static int
-x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
+static int x86_pmu_prepare_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (long)hcpu;
 	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
-	int i, ret = NOTIFY_OK;
+	int i;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
-			cpuc->kfree_on_online[i] = NULL;
-		if (x86_pmu.cpu_prepare)
-			ret = x86_pmu.cpu_prepare(cpu);
-		break;
+	for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
+		cpuc->kfree_on_online[i] = NULL;
+	if (x86_pmu.cpu_prepare)
+		return x86_pmu.cpu_prepare(cpu);
+	return 0;
+}
 
-	case CPU_STARTING:
-		if (x86_pmu.cpu_starting)
-			x86_pmu.cpu_starting(cpu);
-		break;
+static int x86_pmu_dead_cpu(unsigned int cpu)
+{
+	if (x86_pmu.cpu_dead)
+		x86_pmu.cpu_dead(cpu);
+	return 0;
+}
 
-	case CPU_ONLINE:
-		for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
-			kfree(cpuc->kfree_on_online[i]);
-			cpuc->kfree_on_online[i] = NULL;
-		}
-		break;
+static int x86_pmu_online_cpu(unsigned int cpu)
+{
+	struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+	int i;
 
-	case CPU_DYING:
-		if (x86_pmu.cpu_dying)
-			x86_pmu.cpu_dying(cpu);
-		break;
-
-	case CPU_UP_CANCELED:
-	case CPU_DEAD:
-		if (x86_pmu.cpu_dead)
-			x86_pmu.cpu_dead(cpu);
-		break;
-
-	default:
-		break;
+	for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
+		kfree(cpuc->kfree_on_online[i]);
+		cpuc->kfree_on_online[i] = NULL;
 	}
+	return 0;
+}
 
-	return ret;
+static int x86_pmu_starting_cpu(unsigned int cpu)
+{
+	if (x86_pmu.cpu_starting)
+		x86_pmu.cpu_starting(cpu);
+	return 0;
+}
+
+static int x86_pmu_dying_cpu(unsigned int cpu)
+{
+	if (x86_pmu.cpu_dying)
+		x86_pmu.cpu_dying(cpu);
+	return 0;
 }
 
 static void __init pmu_check_apic(void)
@@ -1787,10 +1787,39 @@
 	pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_counters_fixed);
 	pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
-	perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
-	perf_cpu_notifier(x86_pmu_notifier);
+	/*
+	 * Install callbacks. Core will call them for each online
+	 * cpu.
+	 */
+	err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
+				x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
+	if (err)
+		return err;
+
+	err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
+				"AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
+				x86_pmu_dying_cpu);
+	if (err)
+		goto out;
+
+	err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
+				x86_pmu_online_cpu, NULL);
+	if (err)
+		goto out1;
+
+	err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+	if (err)
+		goto out2;
 
 	return 0;
+
+out2:
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
+out1:
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
+out:
+	cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
+	return err;
 }
 early_initcall(init_hw_perf_events);
 
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 0974ba1..2cbde2f 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3109,7 +3109,7 @@
 		cpuc->excl_thread_id = 0;
 	}
 
-	return NOTIFY_OK;
+	return 0;
 
 err_constraint_list:
 	kfree(cpuc->constraint_list);
@@ -3120,7 +3120,7 @@
 	cpuc->shared_regs = NULL;
 
 err:
-	return NOTIFY_BAD;
+	return -ENOMEM;
 }
 
 static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 7b5fd81..783c49d 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -1577,7 +1577,7 @@
 		cpumask_set_cpu(cpu, &cqm_cpumask);
 }
 
-static void intel_cqm_cpu_starting(unsigned int cpu)
+static int intel_cqm_cpu_starting(unsigned int cpu)
 {
 	struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1588,39 +1588,26 @@
 
 	WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
 	WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
+
+	cqm_pick_event_reader(cpu);
+	return 0;
 }
 
-static void intel_cqm_cpu_exit(unsigned int cpu)
+static int intel_cqm_cpu_exit(unsigned int cpu)
 {
 	int target;
 
 	/* Is @cpu the current cqm reader for this package ? */
 	if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask))
-		return;
+		return 0;
 
 	/* Find another online reader in this package */
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
 
 	if (target < nr_cpu_ids)
 		cpumask_set_cpu(target, &cqm_cpumask);
-}
 
-static int intel_cqm_cpu_notifier(struct notifier_block *nb,
-				  unsigned long action, void *hcpu)
-{
-	unsigned int cpu  = (unsigned long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
-		intel_cqm_cpu_exit(cpu);
-		break;
-	case CPU_STARTING:
-		intel_cqm_cpu_starting(cpu);
-		cqm_pick_event_reader(cpu);
-		break;
-	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 static const struct x86_cpu_id intel_cqm_match[] = {
@@ -1682,7 +1669,7 @@
 static int __init intel_cqm_init(void)
 {
 	char *str = NULL, scale[20];
-	int i, cpu, ret;
+	int cpu, ret;
 
 	if (x86_match_cpu(intel_cqm_match))
 		cqm_enabled = true;
@@ -1705,8 +1692,7 @@
 	 *
 	 * Also, check that the scales match on all cpus.
 	 */
-	cpu_notifier_register_begin();
-
+	get_online_cpus();
 	for_each_online_cpu(cpu) {
 		struct cpuinfo_x86 *c = &cpu_data(cpu);
 
@@ -1743,11 +1729,6 @@
 	if (ret)
 		goto out;
 
-	for_each_online_cpu(i) {
-		intel_cqm_cpu_starting(i);
-		cqm_pick_event_reader(i);
-	}
-
 	if (mbm_enabled)
 		ret = intel_mbm_init();
 	if (ret && !cqm_enabled)
@@ -1772,12 +1753,18 @@
 		pr_info("Intel MBM enabled\n");
 
 	/*
-	 * Register the hot cpu notifier once we are sure cqm
+	 * Setup the hot cpu notifier once we are sure cqm
 	 * is enabled to avoid notifier leak.
 	 */
-	__perf_cpu_notifier(intel_cqm_cpu_notifier);
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
+			  "AP_PERF_X86_CQM_STARTING",
+			  intel_cqm_cpu_starting, NULL);
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE",
+			  NULL, intel_cqm_cpu_exit);
+
 out:
-	cpu_notifier_register_done();
+	put_online_cpus();
+
 	if (ret) {
 		kfree(str);
 		cqm_cleanup();
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index 4c7638b..3ca87b5 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -366,7 +366,7 @@
  * Check if exiting cpu is the designated reader. If so migrate the
  * events when there is a valid target available
  */
-static void cstate_cpu_exit(int cpu)
+static int cstate_cpu_exit(unsigned int cpu)
 {
 	unsigned int target;
 
@@ -391,9 +391,10 @@
 			perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
 		}
 	}
+	return 0;
 }
 
-static void cstate_cpu_init(int cpu)
+static int cstate_cpu_init(unsigned int cpu)
 {
 	unsigned int target;
 
@@ -415,31 +416,10 @@
 				 topology_core_cpumask(cpu));
 	if (has_cstate_pkg && target >= nr_cpu_ids)
 		cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
+
+	return 0;
 }
 
-static int cstate_cpu_notifier(struct notifier_block *self,
-			       unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		cstate_cpu_init(cpu);
-		break;
-	case CPU_DOWN_PREPARE:
-		cstate_cpu_exit(cpu);
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block cstate_cpu_nb = {
-	.notifier_call	= cstate_cpu_notifier,
-	.priority       = CPU_PRI_PERF + 1,
-};
-
 static struct pmu cstate_core_pmu = {
 	.attr_groups	= core_attr_groups,
 	.name		= "cstate_core",
@@ -600,18 +580,20 @@
 
 static int __init cstate_init(void)
 {
-	int cpu, err;
+	int err;
 
-	cpu_notifier_register_begin();
-	for_each_online_cpu(cpu)
-		cstate_cpu_init(cpu);
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
+			  "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
+			  NULL);
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+			  "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
 
 	if (has_cstate_core) {
 		err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
 		if (err) {
 			has_cstate_core = false;
 			pr_info("Failed to register cstate core pmu\n");
-			goto out;
+			return err;
 		}
 	}
 
@@ -621,12 +603,10 @@
 			has_cstate_pkg = false;
 			pr_info("Failed to register cstate pkg pmu\n");
 			cstate_cleanup();
-			goto out;
+			return err;
 		}
 	}
-	__register_cpu_notifier(&cstate_cpu_nb);
-out:
-	cpu_notifier_register_done();
+
 	return err;
 }
 
@@ -652,9 +632,8 @@
 
 static void __exit cstate_pmu_exit(void)
 {
-	cpu_notifier_register_begin();
-	__unregister_cpu_notifier(&cstate_cpu_nb);
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
 	cstate_cleanup();
-	cpu_notifier_register_done();
 }
 module_exit(cstate_pmu_exit);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index d0c58b3..2886593 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -556,14 +556,14 @@
 	NULL,
 };
 
-static void rapl_cpu_exit(int cpu)
+static int rapl_cpu_offline(unsigned int cpu)
 {
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
 
 	/* Check if exiting cpu is used for collecting rapl events */
 	if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask))
-		return;
+		return 0;
 
 	pmu->cpu = -1;
 	/* Find a new cpu to collect rapl events */
@@ -575,9 +575,10 @@
 		pmu->cpu = target;
 		perf_pmu_migrate_context(pmu->pmu, cpu, target);
 	}
+	return 0;
 }
 
-static void rapl_cpu_init(int cpu)
+static int rapl_cpu_online(unsigned int cpu)
 {
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 	int target;
@@ -588,13 +589,14 @@
 	 */
 	target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu));
 	if (target < nr_cpu_ids)
-		return;
+		return 0;
 
 	cpumask_set_cpu(cpu, &rapl_cpu_mask);
 	pmu->cpu = cpu;
+	return 0;
 }
 
-static int rapl_cpu_prepare(int cpu)
+static int rapl_cpu_prepare(unsigned int cpu)
 {
 	struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
 
@@ -615,33 +617,6 @@
 	return 0;
 }
 
-static int rapl_cpu_notifier(struct notifier_block *self,
-			     unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		rapl_cpu_prepare(cpu);
-		break;
-
-	case CPU_DOWN_FAILED:
-	case CPU_ONLINE:
-		rapl_cpu_init(cpu);
-		break;
-
-	case CPU_DOWN_PREPARE:
-		rapl_cpu_exit(cpu);
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block rapl_cpu_nb = {
-	.notifier_call	= rapl_cpu_notifier,
-	.priority       = CPU_PRI_PERF + 1,
-};
-
 static int rapl_check_hw_unit(bool apply_quirk)
 {
 	u64 msr_rapl_power_unit_bits;
@@ -692,24 +667,6 @@
 	}
 }
 
-static int __init rapl_prepare_cpus(void)
-{
-	unsigned int cpu, pkg;
-	int ret;
-
-	for_each_online_cpu(cpu) {
-		pkg = topology_logical_package_id(cpu);
-		if (rapl_pmus->pmus[pkg])
-			continue;
-
-		ret = rapl_cpu_prepare(cpu);
-		if (ret)
-			return ret;
-		rapl_cpu_init(cpu);
-	}
-	return 0;
-}
-
 static void cleanup_rapl_pmus(void)
 {
 	int i;
@@ -837,35 +794,44 @@
 	if (ret)
 		return ret;
 
-	cpu_notifier_register_begin();
+	/*
+	 * Install callbacks. Core will call them for each online cpu.
+	 */
 
-	ret = rapl_prepare_cpus();
+	ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
+				rapl_cpu_prepare, NULL);
 	if (ret)
 		goto out;
 
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
+				"AP_PERF_X86_RAPL_ONLINE",
+				rapl_cpu_online, rapl_cpu_offline);
+	if (ret)
+		goto out1;
+
 	ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
 	if (ret)
-		goto out;
+		goto out2;
 
-	__register_cpu_notifier(&rapl_cpu_nb);
-	cpu_notifier_register_done();
 	rapl_advertise();
 	return 0;
 
+out2:
+	cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
+out1:
+	cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
 out:
 	pr_warn("Initialization failed (%d), disabled\n", ret);
 	cleanup_rapl_pmus();
-	cpu_notifier_register_done();
 	return ret;
 }
 module_init(rapl_pmu_init);
 
 static void __exit intel_rapl_exit(void)
 {
-	cpu_notifier_register_begin();
-	__unregister_cpu_notifier(&rapl_cpu_nb);
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
+	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
 	perf_pmu_unregister(&rapl_pmus->pmu);
 	cleanup_rapl_pmus();
-	cpu_notifier_register_done();
 }
 module_exit(intel_rapl_exit);
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 59b4974..3f3d0d6 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1052,7 +1052,7 @@
 	}
 }
 
-static void uncore_cpu_dying(int cpu)
+static int uncore_cpu_dying(unsigned int cpu)
 {
 	struct intel_uncore_type *type, **types = uncore_msr_uncores;
 	struct intel_uncore_pmu *pmu;
@@ -1069,16 +1069,19 @@
 				uncore_box_exit(box);
 		}
 	}
+	return 0;
 }
 
-static void uncore_cpu_starting(int cpu, bool init)
+static int first_init;
+
+static int uncore_cpu_starting(unsigned int cpu)
 {
 	struct intel_uncore_type *type, **types = uncore_msr_uncores;
 	struct intel_uncore_pmu *pmu;
 	struct intel_uncore_box *box;
 	int i, pkg, ncpus = 1;
 
-	if (init) {
+	if (first_init) {
 		/*
 		 * On init we get the number of online cpus in the package
 		 * and set refcount for all of them.
@@ -1099,9 +1102,11 @@
 				uncore_box_init(box);
 		}
 	}
+
+	return 0;
 }
 
-static int uncore_cpu_prepare(int cpu)
+static int uncore_cpu_prepare(unsigned int cpu)
 {
 	struct intel_uncore_type *type, **types = uncore_msr_uncores;
 	struct intel_uncore_pmu *pmu;
@@ -1164,13 +1169,13 @@
 		uncore_change_type_ctx(*uncores, old_cpu, new_cpu);
 }
 
-static void uncore_event_exit_cpu(int cpu)
+static int uncore_event_cpu_offline(unsigned int cpu)
 {
 	int target;
 
 	/* Check if exiting cpu is used for collecting uncore events */
 	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
-		return;
+		return 0;
 
 	/* Find a new cpu to collect uncore events */
 	target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
@@ -1183,9 +1188,10 @@
 
 	uncore_change_context(uncore_msr_uncores, cpu, target);
 	uncore_change_context(uncore_pci_uncores, cpu, target);
+	return 0;
 }
 
-static void uncore_event_init_cpu(int cpu)
+static int uncore_event_cpu_online(unsigned int cpu)
 {
 	int target;
 
@@ -1195,50 +1201,15 @@
 	 */
 	target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu));
 	if (target < nr_cpu_ids)
-		return;
+		return 0;
 
 	cpumask_set_cpu(cpu, &uncore_cpu_mask);
 
 	uncore_change_context(uncore_msr_uncores, -1, cpu);
 	uncore_change_context(uncore_pci_uncores, -1, cpu);
+	return 0;
 }
 
-static int uncore_cpu_notifier(struct notifier_block *self,
-			       unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		return notifier_from_errno(uncore_cpu_prepare(cpu));
-
-	case CPU_STARTING:
-		uncore_cpu_starting(cpu, false);
-	case CPU_DOWN_FAILED:
-		uncore_event_init_cpu(cpu);
-		break;
-
-	case CPU_UP_CANCELED:
-	case CPU_DYING:
-		uncore_cpu_dying(cpu);
-		break;
-
-	case CPU_DOWN_PREPARE:
-		uncore_event_exit_cpu(cpu);
-		break;
-	}
-	return NOTIFY_OK;
-}
-
-static struct notifier_block uncore_cpu_nb = {
-	.notifier_call	= uncore_cpu_notifier,
-	/*
-	 * to migrate uncore events, our notifier should be executed
-	 * before perf core's notifier.
-	 */
-	.priority	= CPU_PRI_PERF + 1,
-};
-
 static int __init type_pmu_register(struct intel_uncore_type *type)
 {
 	int i, ret;
@@ -1282,41 +1253,6 @@
 	return ret;
 }
 
-static void __init uncore_cpu_setup(void *dummy)
-{
-	uncore_cpu_starting(smp_processor_id(), true);
-}
-
-/* Lazy to avoid allocation of a few bytes for the normal case */
-static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC);
-
-static int __init uncore_cpumask_init(bool msr)
-{
-	unsigned int cpu;
-
-	for_each_online_cpu(cpu) {
-		unsigned int pkg = topology_logical_package_id(cpu);
-		int ret;
-
-		if (test_and_set_bit(pkg, packages))
-			continue;
-		/*
-		 * The first online cpu of each package allocates and takes
-		 * the refcounts for all other online cpus in that package.
-		 * If msrs are not enabled no allocation is required.
-		 */
-		if (msr) {
-			ret = uncore_cpu_prepare(cpu);
-			if (ret)
-				return ret;
-		}
-		uncore_event_init_cpu(cpu);
-		smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1);
-	}
-	__register_cpu_notifier(&uncore_cpu_nb);
-	return 0;
-}
-
 #define X86_UNCORE_MODEL_MATCH(model, init)	\
 	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init }
 
@@ -1440,11 +1376,33 @@
 	if (cret && pret)
 		return -ENODEV;
 
-	cpu_notifier_register_begin();
-	ret = uncore_cpumask_init(!cret);
-	if (ret)
-		goto err;
-	cpu_notifier_register_done();
+	/*
+	 * Install callbacks. Core will call them for each online cpu.
+	 *
+	 * The first online cpu of each package allocates and takes
+	 * the refcounts for all other online cpus in that package.
+	 * If msrs are not enabled no allocation is required and
+	 * uncore_cpu_prepare() is not called for each online cpu.
+	 */
+	if (!cret) {
+	       ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
+					"PERF_X86_UNCORE_PREP",
+					uncore_cpu_prepare, NULL);
+		if (ret)
+			goto err;
+	} else {
+		cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
+					  "PERF_X86_UNCORE_PREP",
+					  uncore_cpu_prepare, NULL);
+	}
+	first_init = 1;
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
+			  "AP_PERF_X86_UNCORE_STARTING",
+			  uncore_cpu_starting, uncore_cpu_dying);
+	first_init = 0;
+	cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+			  "AP_PERF_X86_UNCORE_ONLINE",
+			  uncore_event_cpu_online, uncore_event_cpu_offline);
 	return 0;
 
 err:
@@ -1452,17 +1410,16 @@
 	on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
-	cpu_notifier_register_done();
 	return ret;
 }
 module_init(intel_uncore_init);
 
 static void __exit intel_uncore_exit(void)
 {
-	cpu_notifier_register_begin();
-	__unregister_cpu_notifier(&uncore_cpu_nb);
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
+	cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
 	uncore_types_exit(uncore_msr_uncores);
 	uncore_pci_exit();
-	cpu_notifier_register_done();
 }
 module_exit(intel_uncore_exit);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index cefacba..456316f 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -215,26 +215,18 @@
  * cpu timers during the offline process due to the ordering of notification.
  * the extra interrupt is harmless.
  */
-static int apbt_cpuhp_notify(struct notifier_block *n,
-			     unsigned long action, void *hcpu)
+static int apbt_cpu_dead(unsigned int cpu)
 {
-	unsigned long cpu = (unsigned long)hcpu;
 	struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu);
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DEAD:
-		dw_apb_clockevent_pause(adev->timer);
-		if (system_state == SYSTEM_RUNNING) {
-			pr_debug("skipping APBT CPU %lu offline\n", cpu);
-		} else {
-			pr_debug("APBT clockevent for cpu %lu offline\n", cpu);
-			dw_apb_clockevent_stop(adev->timer);
-		}
-		break;
-	default:
-		pr_debug("APBT notified %lu, no action\n", action);
+	dw_apb_clockevent_pause(adev->timer);
+	if (system_state == SYSTEM_RUNNING) {
+		pr_debug("skipping APBT CPU %u offline\n", cpu);
+	} else {
+		pr_debug("APBT clockevent for cpu %u offline\n", cpu);
+		dw_apb_clockevent_stop(adev->timer);
 	}
-	return NOTIFY_OK;
+	return 0;
 }
 
 static __init int apbt_late_init(void)
@@ -242,9 +234,8 @@
 	if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT ||
 		!apb_timer_block_enabled)
 		return 0;
-	/* This notifier should be called after workqueue is ready */
-	hotcpu_notifier(apbt_cpuhp_notify, -20);
-	return 0;
+	return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL,
+				 apbt_cpu_dead);
 }
 fs_initcall(apbt_late_init);
 #else
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 24170d0..6368fa6 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -152,68 +152,48 @@
 	}
 }
 
- /*
-  * At CPU state changes, update the x2apic cluster sibling info.
-  */
-static int
-update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
+/*
+ * At CPU state changes, update the x2apic cluster sibling info.
+ */
+int x2apic_prepare_cpu(unsigned int cpu)
 {
-	unsigned int this_cpu = (unsigned long)hcpu;
-	unsigned int cpu;
-	int err = 0;
+	if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
+		return -ENOMEM;
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-		if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu),
-					GFP_KERNEL)) {
-			err = -ENOMEM;
-		} else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu),
-					       GFP_KERNEL)) {
-			free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
-			err = -ENOMEM;
-		}
-		break;
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-	case CPU_DEAD:
-		for_each_online_cpu(cpu) {
-			if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
-				continue;
-			cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
-			cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
-		}
-		free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
-		free_cpumask_var(per_cpu(ipi_mask, this_cpu));
-		break;
+	if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
+		free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
+		return -ENOMEM;
 	}
 
-	return notifier_from_errno(err);
+	return 0;
 }
 
-static struct notifier_block x2apic_cpu_notifier = {
-	.notifier_call = update_clusterinfo,
-};
-
-static int x2apic_init_cpu_notifier(void)
+int x2apic_dead_cpu(unsigned int this_cpu)
 {
-	int cpu = smp_processor_id();
+	int cpu;
 
-	zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL);
-	zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL);
-
-	BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu));
-
-	cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
-	register_hotcpu_notifier(&x2apic_cpu_notifier);
-	return 1;
+	for_each_online_cpu(cpu) {
+		if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
+			continue;
+		cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
+		cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
+	}
+	free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
+	free_cpumask_var(per_cpu(ipi_mask, this_cpu));
+	return 0;
 }
 
 static int x2apic_cluster_probe(void)
 {
-	if (x2apic_mode)
-		return x2apic_init_cpu_notifier();
-	else
+	int cpu = smp_processor_id();
+
+	if (!x2apic_mode)
 		return 0;
+
+	cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
+	cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE",
+			  x2apic_prepare_cpu, x2apic_dead_cpu);
+	return 1;
 }
 
 static const struct cpumask *x2apic_cluster_target_cpus(void)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f112af7..3d74707 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -710,31 +710,29 @@
 	complete(&hpet_work->complete);
 }
 
-static int hpet_cpuhp_notify(struct notifier_block *n,
-		unsigned long action, void *hcpu)
+static int hpet_cpuhp_online(unsigned int cpu)
 {
-	unsigned long cpu = (unsigned long)hcpu;
 	struct hpet_work_struct work;
+
+	INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
+	init_completion(&work.complete);
+	/* FIXME: add schedule_work_on() */
+	schedule_delayed_work_on(cpu, &work.work, 0);
+	wait_for_completion(&work.complete);
+	destroy_delayed_work_on_stack(&work.work);
+	return 0;
+}
+
+static int hpet_cpuhp_dead(unsigned int cpu)
+{
 	struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu);
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_ONLINE:
-		INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work);
-		init_completion(&work.complete);
-		/* FIXME: add schedule_work_on() */
-		schedule_delayed_work_on(cpu, &work.work, 0);
-		wait_for_completion(&work.complete);
-		destroy_delayed_work_on_stack(&work.work);
-		break;
-	case CPU_DEAD:
-		if (hdev) {
-			free_irq(hdev->irq, hdev);
-			hdev->flags &= ~HPET_DEV_USED;
-			per_cpu(cpu_hpet_dev, cpu) = NULL;
-		}
-		break;
-	}
-	return NOTIFY_OK;
+	if (!hdev)
+		return 0;
+	free_irq(hdev->irq, hdev);
+	hdev->flags &= ~HPET_DEV_USED;
+	per_cpu(cpu_hpet_dev, cpu) = NULL;
+	return 0;
 }
 #else
 
@@ -750,11 +748,8 @@
 }
 #endif
 
-static int hpet_cpuhp_notify(struct notifier_block *n,
-		unsigned long action, void *hcpu)
-{
-	return NOTIFY_OK;
-}
+#define hpet_cpuhp_online	NULL
+#define hpet_cpuhp_dead		NULL
 
 #endif
 
@@ -931,7 +926,7 @@
  */
 static __init int hpet_late_init(void)
 {
-	int cpu;
+	int ret;
 
 	if (boot_hpet_disable)
 		return -ENODEV;
@@ -961,16 +956,20 @@
 	if (boot_cpu_has(X86_FEATURE_ARAT))
 		return 0;
 
-	cpu_notifier_register_begin();
-	for_each_online_cpu(cpu) {
-		hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
-	}
-
 	/* This notifier should be called after workqueue is ready */
-	__hotcpu_notifier(hpet_cpuhp_notify, -20);
-	cpu_notifier_register_done();
-
+	ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE",
+				hpet_cpuhp_online, NULL);
+	if (ret)
+		return ret;
+	ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL,
+				hpet_cpuhp_dead);
+	if (ret)
+		goto err_cpuhp;
 	return 0;
+
+err_cpuhp:
+	cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE);
+	return ret;
 }
 fs_initcall(hpet_late_init);
 
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 9b0185f..654f6c6 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -323,25 +323,16 @@
 	return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
 }
 
-static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
-			      void *hcpu)
+static int tboot_dying_cpu(unsigned int cpu)
 {
-	switch (action) {
-	case CPU_DYING:
-		atomic_inc(&ap_wfs_count);
-		if (num_online_cpus() == 1)
-			if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
-				return NOTIFY_BAD;
-		break;
+	atomic_inc(&ap_wfs_count);
+	if (num_online_cpus() == 1) {
+		if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
+			return -EBUSY;
 	}
-	return NOTIFY_OK;
+	return 0;
 }
 
-static struct notifier_block tboot_cpu_notifier =
-{
-	.notifier_call = tboot_cpu_callback,
-};
-
 #ifdef CONFIG_DEBUG_FS
 
 #define TBOOT_LOG_UUID	{ 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \
@@ -417,8 +408,8 @@
 	tboot_create_trampoline();
 
 	atomic_set(&ap_wfs_count, 0);
-	register_hotcpu_notifier(&tboot_cpu_notifier);
-
+	cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL,
+			  tboot_dying_cpu);
 #ifdef CONFIG_DEBUG_FS
 	debugfs_create_file("tboot_log", S_IRUSR,
 			arch_debugfs_dir, NULL, &tboot_log_fops);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b276672..45608a7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5552,9 +5552,10 @@
 }
 EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
 
-static void tsc_bad(void *info)
+static int kvmclock_cpu_down_prep(unsigned int cpu)
 {
 	__this_cpu_write(cpu_tsc_khz, 0);
+	return 0;
 }
 
 static void tsc_khz_changed(void *data)
@@ -5659,35 +5660,18 @@
 	.notifier_call  = kvmclock_cpufreq_notifier
 };
 
-static int kvmclock_cpu_notifier(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
+static int kvmclock_cpu_online(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
-
-	switch (action) {
-		case CPU_ONLINE:
-		case CPU_DOWN_FAILED:
-			smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
-			break;
-		case CPU_DOWN_PREPARE:
-			smp_call_function_single(cpu, tsc_bad, NULL, 1);
-			break;
-	}
-	return NOTIFY_OK;
+	tsc_khz_changed(NULL);
+	return 0;
 }
 
-static struct notifier_block kvmclock_cpu_notifier_block = {
-	.notifier_call  = kvmclock_cpu_notifier,
-	.priority = -INT_MAX
-};
-
 static void kvm_timer_init(void)
 {
 	int cpu;
 
 	max_tsc_khz = tsc_khz;
 
-	cpu_notifier_register_begin();
 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
 #ifdef CONFIG_CPU_FREQ
 		struct cpufreq_policy policy;
@@ -5702,12 +5686,9 @@
 					  CPUFREQ_TRANSITION_NOTIFIER);
 	}
 	pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
-	for_each_online_cpu(cpu)
-		smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
 
-	__register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
-	cpu_notifier_register_done();
-
+	cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE",
+			  kvmclock_cpu_online, kvmclock_cpu_down_prep);
 }
 
 static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
@@ -5896,7 +5877,7 @@
 	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
 		cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
 					    CPUFREQ_TRANSITION_NOTIFIER);
-	unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
+	cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE);
 #ifdef CONFIG_X86_64
 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
 #endif
diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c
index ef90479..0fecc8a 100644
--- a/arch/xtensa/kernel/perf_event.c
+++ b/arch/xtensa/kernel/perf_event.c
@@ -404,7 +404,7 @@
 	.read = xtensa_pmu_read,
 };
 
-static void xtensa_pmu_setup(void)
+static int xtensa_pmu_setup(int cpu)
 {
 	unsigned i;
 
@@ -413,21 +413,7 @@
 		set_er(0, XTENSA_PMU_PMCTRL(i));
 		set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i));
 	}
-}
-
-static int xtensa_pmu_notifier(struct notifier_block *self,
-			       unsigned long action, void *data)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		xtensa_pmu_setup();
-		break;
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
+	return 0;
 }
 
 static int __init xtensa_pmu_init(void)
@@ -435,7 +421,13 @@
 	int ret;
 	int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT);
 
-	perf_cpu_notifier(xtensa_pmu_notifier);
+	ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING,
+				"AP_PERF_XTENSA_STARTING", xtensa_pmu_setup,
+				NULL);
+	if (ret) {
+		pr_err("xtensa_pmu: failed to register CPU-hotplug.\n");
+		return ret;
+	}
 #if XTENSA_FAKE_NMI
 	enable_irq(irq);
 #else
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 0ca14ac..0553aee 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -118,12 +118,13 @@
 	struct acpi_device *device;
 	action &= ~CPU_TASKS_FROZEN;
 
-	/*
-	 * CPU_STARTING and CPU_DYING must not sleep. Return here since
-	 * acpi_bus_get_device() may sleep.
-	 */
-	if (action == CPU_STARTING || action == CPU_DYING)
+	switch (action) {
+	case CPU_ONLINE:
+	case CPU_DEAD:
+		break;
+	default:
 		return NOTIFY_DONE;
+	}
 
 	if (!pr || acpi_bus_get_device(pr->handle, &device))
 		return NOTIFY_DONE;
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index a49b283..5755907f 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -144,12 +144,15 @@
 	int num_cntrs;
 	atomic_t active_events;
 	struct mutex reserve_mutex;
-	struct notifier_block cpu_nb;
+	struct list_head entry;
 	cpumask_t cpus;
 };
 
 #define to_cci_pmu(c)	(container_of(c, struct cci_pmu, pmu))
 
+static DEFINE_MUTEX(cci_pmu_mutex);
+static LIST_HEAD(cci_pmu_list);
+
 enum cci_models {
 #ifdef CONFIG_ARM_CCI400_PMU
 	CCI400_R0,
@@ -1503,31 +1506,26 @@
 	return perf_pmu_register(&cci_pmu->pmu, name, -1);
 }
 
-static int cci_pmu_cpu_notifier(struct notifier_block *self,
-				unsigned long action, void *hcpu)
+static int cci_pmu_offline_cpu(unsigned int cpu)
 {
-	struct cci_pmu *cci_pmu = container_of(self,
-					struct cci_pmu, cpu_nb);
-	unsigned int cpu = (long)hcpu;
+	struct cci_pmu *cci_pmu;
 	unsigned int target;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
+	mutex_lock(&cci_pmu_mutex);
+	list_for_each_entry(cci_pmu, &cci_pmu_list, entry) {
 		if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus))
-			break;
+			continue;
 		target = cpumask_any_but(cpu_online_mask, cpu);
-		if (target >= nr_cpu_ids) // UP, last CPU
-			break;
+		if (target >= nr_cpu_ids)
+			continue;
 		/*
 		 * TODO: migrate context once core races on event->ctx have
 		 * been fixed.
 		 */
 		cpumask_set_cpu(target, &cci_pmu->cpus);
-	default:
-		break;
 	}
-
-	return NOTIFY_OK;
+	mutex_unlock(&cci_pmu_mutex);
+	return 0;
 }
 
 static struct cci_pmu_model cci_pmu_models[] = {
@@ -1766,24 +1764,13 @@
 	atomic_set(&cci_pmu->active_events, 0);
 	cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
 
-	cci_pmu->cpu_nb = (struct notifier_block) {
-		.notifier_call	= cci_pmu_cpu_notifier,
-		/*
-		 * to migrate uncore events, our notifier should be executed
-		 * before perf core's notifier.
-		 */
-		.priority	= CPU_PRI_PERF + 1,
-	};
-
-	ret = register_cpu_notifier(&cci_pmu->cpu_nb);
+	ret = cci_pmu_init(cci_pmu, pdev);
 	if (ret)
 		return ret;
 
-	ret = cci_pmu_init(cci_pmu, pdev);
-	if (ret) {
-		unregister_cpu_notifier(&cci_pmu->cpu_nb);
-		return ret;
-	}
+	mutex_lock(&cci_pmu_mutex);
+	list_add(&cci_pmu->entry, &cci_pmu_list);
+	mutex_unlock(&cci_pmu_mutex);
 
 	pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
 	return 0;
@@ -1817,6 +1804,12 @@
 {
 	int ret;
 
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
+					"AP_PERF_ARM_CCI_ONLINE", NULL,
+					cci_pmu_offline_cpu);
+	if (ret)
+		return ret;
+
 	ret = platform_driver_register(&cci_pmu_driver);
 	if (ret)
 		return ret;
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
index acc3eb5..97a9185 100644
--- a/drivers/bus/arm-ccn.c
+++ b/drivers/bus/arm-ccn.c
@@ -167,7 +167,7 @@
 	struct hrtimer hrtimer;
 
 	cpumask_t cpu;
-	struct notifier_block cpu_nb;
+	struct list_head entry;
 
 	struct pmu pmu;
 };
@@ -189,6 +189,8 @@
 	struct arm_ccn_dt dt;
 };
 
+static DEFINE_MUTEX(arm_ccn_mutex);
+static LIST_HEAD(arm_ccn_list);
 
 static int arm_ccn_node_to_xp(int node)
 {
@@ -1171,30 +1173,27 @@
 }
 
 
-static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb,
-		unsigned long action, void *hcpu)
+static int arm_ccn_pmu_offline_cpu(unsigned int cpu)
 {
-	struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb);
-	struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
-	unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */
+	struct arm_ccn_dt *dt;
 	unsigned int target;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
+	mutex_lock(&arm_ccn_mutex);
+	list_for_each_entry(dt, &arm_ccn_list, entry) {
+		struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
+
 		if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu))
-			break;
+			continue;
 		target = cpumask_any_but(cpu_online_mask, cpu);
 		if (target >= nr_cpu_ids)
-			break;
+			continue;
 		perf_pmu_migrate_context(&dt->pmu, cpu, target);
 		cpumask_set_cpu(target, &dt->cpu);
 		if (ccn->irq)
 			WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0);
-	default:
-		break;
 	}
-
-	return NOTIFY_OK;
+	mutex_unlock(&arm_ccn_mutex);
+	return 0;
 }
 
 
@@ -1266,16 +1265,6 @@
 	/* Pick one CPU which we will use to collect data from CCN... */
 	cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
 
-	/*
-	 * ... and change the selection when it goes offline. Priority is
-	 * picked to have a chance to migrate events before perf is notified.
-	 */
-	ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier;
-	ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1,
-	err = register_cpu_notifier(&ccn->dt.cpu_nb);
-	if (err)
-		goto error_cpu_notifier;
-
 	/* Also make sure that the overflow interrupt is handled by this CPU */
 	if (ccn->irq) {
 		err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu);
@@ -1289,12 +1278,13 @@
 	if (err)
 		goto error_pmu_register;
 
+	mutex_lock(&arm_ccn_mutex);
+	list_add(&ccn->dt.entry, &arm_ccn_list);
+	mutex_unlock(&arm_ccn_mutex);
 	return 0;
 
 error_pmu_register:
 error_set_affinity:
-	unregister_cpu_notifier(&ccn->dt.cpu_nb);
-error_cpu_notifier:
 	ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
 	for (i = 0; i < ccn->num_xps; i++)
 		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
@@ -1306,9 +1296,12 @@
 {
 	int i;
 
+	mutex_lock(&arm_ccn_mutex);
+	list_del(&ccn->dt.entry);
+	mutex_unlock(&arm_ccn_mutex);
+
 	if (ccn->irq)
 		irq_set_affinity_hint(ccn->irq, NULL);
-	unregister_cpu_notifier(&ccn->dt.cpu_nb);
 	for (i = 0; i < ccn->num_xps; i++)
 		writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
 	writel(0, ccn->dt.base + CCN_DT_PMCR);
@@ -1316,7 +1309,6 @@
 	ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
 }
 
-
 static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
 		int (*callback)(struct arm_ccn *ccn, int region,
 		void __iomem *base, u32 type, u32 id))
@@ -1533,7 +1525,13 @@
 
 static int __init arm_ccn_init(void)
 {
-	int i;
+	int i, ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
+					"AP_PERF_ARM_CCN_ONLINE", NULL,
+					arm_ccn_pmu_offline_cpu);
+	if (ret)
+		return ret;
 
 	for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
 		arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
@@ -1543,6 +1541,7 @@
 
 static void __exit arm_ccn_exit(void)
 {
+	cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE);
 	platform_driver_unregister(&arm_ccn_driver);
 }
 
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 5effd30..28bce3f 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -370,8 +370,10 @@
 		arch_timer_ppi[PHYS_NONSECURE_PPI]);
 }
 
-static int arch_timer_setup(struct clock_event_device *clk)
+static int arch_timer_starting_cpu(unsigned int cpu)
 {
+	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
+
 	__arch_timer_setup(ARCH_CP15_TIMER, clk);
 
 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
@@ -527,29 +529,14 @@
 	clk->set_state_shutdown(clk);
 }
 
-static int arch_timer_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
+static int arch_timer_dying_cpu(unsigned int cpu)
 {
-	/*
-	 * Grab cpu pointer in each case to avoid spurious
-	 * preemptible warnings
-	 */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		arch_timer_setup(this_cpu_ptr(arch_timer_evt));
-		break;
-	case CPU_DYING:
-		arch_timer_stop(this_cpu_ptr(arch_timer_evt));
-		break;
-	}
+	struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt);
 
-	return NOTIFY_OK;
+	arch_timer_stop(clk);
+	return 0;
 }
 
-static struct notifier_block arch_timer_cpu_nb = {
-	.notifier_call = arch_timer_cpu_notify,
-};
-
 #ifdef CONFIG_CPU_PM
 static unsigned int saved_cntkctl;
 static int arch_timer_cpu_pm_notify(struct notifier_block *self,
@@ -570,11 +557,21 @@
 {
 	return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
 }
+
+static void __init arch_timer_cpu_pm_deinit(void)
+{
+	WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier));
+}
+
 #else
 static int __init arch_timer_cpu_pm_init(void)
 {
 	return 0;
 }
+
+static void __init arch_timer_cpu_pm_deinit(void)
+{
+}
 #endif
 
 static int __init arch_timer_register(void)
@@ -621,22 +618,23 @@
 		goto out_free;
 	}
 
-	err = register_cpu_notifier(&arch_timer_cpu_nb);
-	if (err)
-		goto out_free_irq;
-
 	err = arch_timer_cpu_pm_init();
 	if (err)
 		goto out_unreg_notify;
 
-	/* Immediately configure the timer on the boot CPU */
-	arch_timer_setup(this_cpu_ptr(arch_timer_evt));
 
+	/* Register and immediately configure the timer on the boot CPU */
+	err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+				"AP_ARM_ARCH_TIMER_STARTING",
+				arch_timer_starting_cpu, arch_timer_dying_cpu);
+	if (err)
+		goto out_unreg_cpupm;
 	return 0;
 
+out_unreg_cpupm:
+	arch_timer_cpu_pm_deinit();
+
 out_unreg_notify:
-	unregister_cpu_notifier(&arch_timer_cpu_nb);
-out_free_irq:
 	free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
 	if (arch_timer_has_nonsecure_ppi())
 		free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 2a9ceb6..8da0329 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -165,9 +165,9 @@
 	return IRQ_HANDLED;
 }
 
-static int gt_clockevents_init(struct clock_event_device *clk)
+static int gt_starting_cpu(unsigned int cpu)
 {
-	int cpu = smp_processor_id();
+	struct clock_event_device *clk = this_cpu_ptr(gt_evt);
 
 	clk->name = "arm_global_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
@@ -186,10 +186,13 @@
 	return 0;
 }
 
-static void gt_clockevents_stop(struct clock_event_device *clk)
+static int gt_dying_cpu(unsigned int cpu)
 {
+	struct clock_event_device *clk = this_cpu_ptr(gt_evt);
+
 	gt_clockevent_shutdown(clk);
 	disable_percpu_irq(clk->irq);
+	return 0;
 }
 
 static cycle_t gt_clocksource_read(struct clocksource *cs)
@@ -252,24 +255,6 @@
 	return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
 }
 
-static int gt_cpu_notify(struct notifier_block *self, unsigned long action,
-			 void *hcpu)
-{
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		gt_clockevents_init(this_cpu_ptr(gt_evt));
-		break;
-	case CPU_DYING:
-		gt_clockevents_stop(this_cpu_ptr(gt_evt));
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-static struct notifier_block gt_cpu_nb = {
-	.notifier_call = gt_cpu_notify,
-};
-
 static int __init global_timer_of_register(struct device_node *np)
 {
 	struct clk *gt_clk;
@@ -325,18 +310,14 @@
 		goto out_free;
 	}
 
-	err = register_cpu_notifier(&gt_cpu_nb);
-	if (err) {
-		pr_warn("global-timer: unable to register cpu notifier.\n");
-		goto out_irq;
-	}
-
-	/* Immediately configure the timer on the boot CPU */
+	/* Register and immediately configure the timer on the boot CPU */
 	err = gt_clocksource_init();
 	if (err)
 		goto out_irq;
 	
-	err = gt_clockevents_init(this_cpu_ptr(gt_evt));
+	err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
+				"AP_ARM_GLOBAL_TIMER_STARTING",
+				gt_starting_cpu, gt_dying_cpu);
 	if (err)
 		goto out_irq;
 
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index 776b6c8..89f1c2e 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -16,10 +16,9 @@
 
 static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt);
 
-static void dummy_timer_setup(void)
+static int dummy_timer_starting_cpu(unsigned int cpu)
 {
-	int cpu = smp_processor_id();
-	struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
+	struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu);
 
 	evt->name	= "dummy_timer";
 	evt->features	= CLOCK_EVT_FEAT_PERIODIC |
@@ -29,36 +28,13 @@
 	evt->cpumask	= cpumask_of(cpu);
 
 	clockevents_register_device(evt);
+	return 0;
 }
 
-static int dummy_timer_cpu_notify(struct notifier_block *self,
-				      unsigned long action, void *hcpu)
-{
-	if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING)
-		dummy_timer_setup();
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block dummy_timer_cpu_nb = {
-	.notifier_call = dummy_timer_cpu_notify,
-};
-
 static int __init dummy_timer_register(void)
 {
-	int err = 0;
-
-	cpu_notifier_register_begin();
-	err = __register_cpu_notifier(&dummy_timer_cpu_nb);
-	if (err)
-		goto out;
-
-	/* We won't get a call on the boot CPU, so register immediately */
-	if (num_possible_cpus() > 1)
-		dummy_timer_setup();
-
-out:
-	cpu_notifier_register_done();
-	return err;
+	return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING,
+				 "AP_DUMMY_TIMER_STARTING",
+				 dummy_timer_starting_cpu, NULL);
 }
 early_initcall(dummy_timer_register);
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 0d18dd4b..41840d0 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -443,10 +443,11 @@
 	return IRQ_HANDLED;
 }
 
-static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt)
+static int exynos4_mct_starting_cpu(unsigned int cpu)
 {
+	struct mct_clock_event_device *mevt =
+		per_cpu_ptr(&percpu_mct_tick, cpu);
 	struct clock_event_device *evt = &mevt->evt;
-	unsigned int cpu = smp_processor_id();
 
 	mevt->base = EXYNOS4_MCT_L_BASE(cpu);
 	snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu);
@@ -480,8 +481,10 @@
 	return 0;
 }
 
-static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt)
+static int exynos4_mct_dying_cpu(unsigned int cpu)
 {
+	struct mct_clock_event_device *mevt =
+		per_cpu_ptr(&percpu_mct_tick, cpu);
 	struct clock_event_device *evt = &mevt->evt;
 
 	evt->set_state_shutdown(evt);
@@ -491,39 +494,12 @@
 	} else {
 		disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
 	}
+	return 0;
 }
 
-static int exynos4_mct_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
-{
-	struct mct_clock_event_device *mevt;
-
-	/*
-	 * Grab cpu pointer in each case to avoid spurious
-	 * preemptible warnings
-	 */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		mevt = this_cpu_ptr(&percpu_mct_tick);
-		exynos4_local_timer_setup(mevt);
-		break;
-	case CPU_DYING:
-		mevt = this_cpu_ptr(&percpu_mct_tick);
-		exynos4_local_timer_stop(mevt);
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block exynos4_mct_cpu_nb = {
-	.notifier_call = exynos4_mct_cpu_notify,
-};
-
 static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
 {
 	int err, cpu;
-	struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick);
 	struct clk *mct_clk, *tick_clk;
 
 	tick_clk = np ? of_clk_get_by_name(np, "fin_pll") :
@@ -570,12 +546,14 @@
 		}
 	}
 
-	err = register_cpu_notifier(&exynos4_mct_cpu_nb);
+	/* Install hotplug callbacks which configure the timer on this CPU */
+	err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+				"AP_EXYNOS4_MCT_TIMER_STARTING",
+				exynos4_mct_starting_cpu,
+				exynos4_mct_dying_cpu);
 	if (err)
 		goto out_irq;
 
-	/* Immediately configure the timer on the boot CPU */
-	exynos4_local_timer_setup(mevt);
 	return 0;
 
 out_irq:
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index bcd5c0d..a80ab3e 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -90,7 +90,7 @@
 	return ticks << HARDWARE_TO_NS_SHIFT;
 }
 
-static void arch_timer_setup(unsigned int cpu)
+static int arch_timer_starting_cpu(unsigned int cpu)
 {
 	unsigned int txdivtime;
 	struct clock_event_device *clk = &per_cpu(local_clockevent, cpu);
@@ -132,27 +132,9 @@
 		val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0);
 		__core_reg_set(TXTIMER, val);
 	}
+	return 0;
 }
 
-static int arch_timer_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
-{
-	int cpu = (long)hcpu;
-
-	switch (action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
-		arch_timer_setup(cpu);
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block arch_timer_cpu_nb = {
-	.notifier_call = arch_timer_cpu_notify,
-};
-
 int __init metag_generic_timer_init(void)
 {
 	/*
@@ -170,11 +152,8 @@
 
 	setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq);
 
-	/* Configure timer on boot CPU */
-	arch_timer_setup(smp_processor_id());
-
-	/* Hook cpu boot to configure other CPU's timers */
-	register_cpu_notifier(&arch_timer_cpu_nb);
-
-	return 0;
+	/* Hook cpu boot to configure the CPU's timers */
+	return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING,
+				 "AP_METAG_TIMER_STARTING",
+				 arch_timer_starting_cpu, NULL);
 }
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 1572c7a..d91e872 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -49,10 +49,9 @@
 	.name = "timer",
 };
 
-static void gic_clockevent_cpu_init(struct clock_event_device *cd)
+static void gic_clockevent_cpu_init(unsigned int cpu,
+				    struct clock_event_device *cd)
 {
-	unsigned int cpu = smp_processor_id();
-
 	cd->name		= "MIPS GIC";
 	cd->features		= CLOCK_EVT_FEAT_ONESHOT |
 				  CLOCK_EVT_FEAT_C3STOP;
@@ -79,19 +78,10 @@
 	clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate);
 }
 
-static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
-				void *data)
+static int gic_starting_cpu(unsigned int cpu)
 {
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
-		break;
-	case CPU_DYING:
-		gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
-		break;
-	}
-
-	return NOTIFY_OK;
+	gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device));
+	return 0;
 }
 
 static int gic_clk_notifier(struct notifier_block *nb, unsigned long action,
@@ -105,10 +95,11 @@
 	return NOTIFY_OK;
 }
 
-
-static struct notifier_block gic_cpu_nb = {
-	.notifier_call = gic_cpu_notifier,
-};
+static int gic_dying_cpu(unsigned int cpu)
+{
+	gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
+	return 0;
+}
 
 static struct notifier_block gic_clk_nb = {
 	.notifier_call = gic_clk_notifier,
@@ -125,12 +116,9 @@
 	if (ret < 0)
 		return ret;
 
-	ret = register_cpu_notifier(&gic_cpu_nb);
-	if (ret < 0)
-		pr_warn("GIC: Unable to register CPU notifier\n");
-
-	gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
-
+	cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING,
+			  "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu,
+			  gic_dying_cpu);
 	return 0;
 }
 
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 6625763..3283cfa 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -105,9 +105,9 @@
 static int msm_timer_irq;
 static int msm_timer_has_ppi;
 
-static int msm_local_timer_setup(struct clock_event_device *evt)
+static int msm_local_timer_starting_cpu(unsigned int cpu)
 {
-	int cpu = smp_processor_id();
+	struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
 	int err;
 
 	evt->irq = msm_timer_irq;
@@ -135,35 +135,15 @@
 	return 0;
 }
 
-static void msm_local_timer_stop(struct clock_event_device *evt)
+static int msm_local_timer_dying_cpu(unsigned int cpu)
 {
+	struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu);
+
 	evt->set_state_shutdown(evt);
 	disable_percpu_irq(evt->irq);
+	return 0;
 }
 
-static int msm_timer_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
-{
-	/*
-	 * Grab cpu pointer in each case to avoid spurious
-	 * preemptible warnings
-	 */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		msm_local_timer_setup(this_cpu_ptr(msm_evt));
-		break;
-	case CPU_DYING:
-		msm_local_timer_stop(this_cpu_ptr(msm_evt));
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block msm_timer_cpu_nb = {
-	.notifier_call = msm_timer_cpu_notify,
-};
-
 static u64 notrace msm_sched_clock_read(void)
 {
 	return msm_clocksource.read(&msm_clocksource);
@@ -200,14 +180,15 @@
 	if (res) {
 		pr_err("request_percpu_irq failed\n");
 	} else {
-		res = register_cpu_notifier(&msm_timer_cpu_nb);
+		/* Install and invoke hotplug callbacks */
+		res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING,
+					"AP_QCOM_TIMER_STARTING",
+					msm_local_timer_starting_cpu,
+					msm_local_timer_dying_cpu);
 		if (res) {
 			free_percpu_irq(irq, msm_evt);
 			goto err;
 		}
-
-		/* Immediately configure the timer on the boot CPU */
-		msm_local_timer_setup(raw_cpu_ptr(msm_evt));
 	}
 
 err:
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index 20ec066..719b478 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -170,10 +170,10 @@
 /*
  * Setup the local clock events for a CPU.
  */
-static int armada_370_xp_timer_setup(struct clock_event_device *evt)
+static int armada_370_xp_timer_starting_cpu(unsigned int cpu)
 {
+	struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
 	u32 clr = 0, set = 0;
-	int cpu = smp_processor_id();
 
 	if (timer25Mhz)
 		set = TIMER0_25MHZ;
@@ -200,35 +200,15 @@
 	return 0;
 }
 
-static void armada_370_xp_timer_stop(struct clock_event_device *evt)
+static int armada_370_xp_timer_dying_cpu(unsigned int cpu)
 {
+	struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu);
+
 	evt->set_state_shutdown(evt);
 	disable_percpu_irq(evt->irq);
+	return 0;
 }
 
-static int armada_370_xp_timer_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
-{
-	/*
-	 * Grab cpu pointer in each case to avoid spurious
-	 * preemptible warnings
-	 */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
-		break;
-	case CPU_DYING:
-		armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt));
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block armada_370_xp_timer_cpu_nb = {
-	.notifier_call = armada_370_xp_timer_cpu_notify,
-};
-
 static u32 timer0_ctrl_reg, timer0_local_ctrl_reg;
 
 static int armada_370_xp_timer_suspend(void)
@@ -322,8 +302,6 @@
 		return res;
 	}
 
-	register_cpu_notifier(&armada_370_xp_timer_cpu_nb);
-
 	armada_370_xp_evt = alloc_percpu(struct clock_event_device);
 	if (!armada_370_xp_evt)
 		return -ENOMEM;
@@ -341,9 +319,12 @@
 		return res;
 	}
 
-	res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt));
+	res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING,
+				"AP_ARMADA_TIMER_STARTING",
+				armada_370_xp_timer_starting_cpu,
+				armada_370_xp_timer_dying_cpu);
 	if (res) {
-		pr_err("Failed to setup timer");
+		pr_err("Failed to setup hotplug state and timer");
 		return res;
 	}
 
diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c
index 90f8fbc..4334e03 100644
--- a/drivers/clocksource/timer-atlas7.c
+++ b/drivers/clocksource/timer-atlas7.c
@@ -172,9 +172,9 @@
 	.handler = sirfsoc_timer_interrupt,
 };
 
-static int sirfsoc_local_timer_setup(struct clock_event_device *ce)
+static int sirfsoc_local_timer_starting_cpu(unsigned int cpu)
 {
-	int cpu = smp_processor_id();
+	struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu);
 	struct irqaction *action;
 
 	if (cpu == 0)
@@ -203,50 +203,27 @@
 	return 0;
 }
 
-static void sirfsoc_local_timer_stop(struct clock_event_device *ce)
+static int sirfsoc_local_timer_dying_cpu(unsigned int cpu)
 {
-	int cpu = smp_processor_id();
-
 	sirfsoc_timer_count_disable(1);
 
 	if (cpu == 0)
 		remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq);
 	else
 		remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq);
+	return 0;
 }
 
-static int sirfsoc_cpu_notify(struct notifier_block *self,
-			      unsigned long action, void *hcpu)
-{
-	/*
-	 * Grab cpu pointer in each case to avoid spurious
-	 * preemptible warnings
-	 */
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
-		break;
-	case CPU_DYING:
-		sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent));
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block sirfsoc_cpu_nb = {
-	.notifier_call = sirfsoc_cpu_notify,
-};
-
 static int __init sirfsoc_clockevent_init(void)
 {
 	sirfsoc_clockevent = alloc_percpu(struct clock_event_device);
 	BUG_ON(!sirfsoc_clockevent);
 
-	BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb));
-
-	/* Immediately configure the timer on the boot CPU */
-	return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent));
+	/* Install and invoke hotplug callbacks */
+	return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING,
+				 "AP_MARCO_TIMER_STARTING",
+				 sirfsoc_local_timer_starting_cpu,
+				 sirfsoc_local_timer_dying_cpu);
 }
 
 /* initialize the kernel jiffy timer source */
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index d83ab82..2de4cad 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -51,6 +51,8 @@
 static int etm_count;
 static struct etm_drvdata *etmdrvdata[NR_CPUS];
 
+static enum cpuhp_state hp_online;
+
 /*
  * Memory mapped writes to clear os lock are not supported on some processors
  * and OS lock must be unlocked before any memory mapped access on such
@@ -481,8 +483,7 @@
 
 	/*
 	 * Configure the ETM only if the CPU is online.  If it isn't online
-	 * hw configuration will take place when 'CPU_STARTING' is received
-	 * in @etm_cpu_callback.
+	 * hw configuration will take place on the local CPU during bring up.
 	 */
 	if (cpu_online(drvdata->cpu)) {
 		ret = smp_call_function_single(drvdata->cpu,
@@ -641,47 +642,44 @@
 	.source_ops	= &etm_source_ops,
 };
 
-static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
-			    void *hcpu)
+static int etm_online_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
-
 	if (!etmdrvdata[cpu])
-		goto out;
+		return 0;
 
-	switch (action & (~CPU_TASKS_FROZEN)) {
-	case CPU_STARTING:
-		spin_lock(&etmdrvdata[cpu]->spinlock);
-		if (!etmdrvdata[cpu]->os_unlock) {
-			etm_os_unlock(etmdrvdata[cpu]);
-			etmdrvdata[cpu]->os_unlock = true;
-		}
-
-		if (local_read(&etmdrvdata[cpu]->mode))
-			etm_enable_hw(etmdrvdata[cpu]);
-		spin_unlock(&etmdrvdata[cpu]->spinlock);
-		break;
-
-	case CPU_ONLINE:
-		if (etmdrvdata[cpu]->boot_enable &&
-		    !etmdrvdata[cpu]->sticky_enable)
-			coresight_enable(etmdrvdata[cpu]->csdev);
-		break;
-
-	case CPU_DYING:
-		spin_lock(&etmdrvdata[cpu]->spinlock);
-		if (local_read(&etmdrvdata[cpu]->mode))
-			etm_disable_hw(etmdrvdata[cpu]);
-		spin_unlock(&etmdrvdata[cpu]->spinlock);
-		break;
-	}
-out:
-	return NOTIFY_OK;
+	if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
+		coresight_enable(etmdrvdata[cpu]->csdev);
+	return 0;
 }
 
-static struct notifier_block etm_cpu_notifier = {
-	.notifier_call = etm_cpu_callback,
-};
+static int etm_starting_cpu(unsigned int cpu)
+{
+	if (!etmdrvdata[cpu])
+		return 0;
+
+	spin_lock(&etmdrvdata[cpu]->spinlock);
+	if (!etmdrvdata[cpu]->os_unlock) {
+		etm_os_unlock(etmdrvdata[cpu]);
+		etmdrvdata[cpu]->os_unlock = true;
+	}
+
+	if (local_read(&etmdrvdata[cpu]->mode))
+		etm_enable_hw(etmdrvdata[cpu]);
+	spin_unlock(&etmdrvdata[cpu]->spinlock);
+	return 0;
+}
+
+static int etm_dying_cpu(unsigned int cpu)
+{
+	if (!etmdrvdata[cpu])
+		return 0;
+
+	spin_lock(&etmdrvdata[cpu]->spinlock);
+	if (local_read(&etmdrvdata[cpu]->mode))
+		etm_disable_hw(etmdrvdata[cpu]);
+	spin_unlock(&etmdrvdata[cpu]->spinlock);
+	return 0;
+}
 
 static bool etm_arch_supported(u8 arch)
 {
@@ -806,9 +804,17 @@
 				     etm_init_arch_data,  drvdata, 1))
 		dev_err(dev, "ETM arch init failed\n");
 
-	if (!etm_count++)
-		register_hotcpu_notifier(&etm_cpu_notifier);
-
+	if (!etm_count++) {
+		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+					  "AP_ARM_CORESIGHT_STARTING",
+					  etm_starting_cpu, etm_dying_cpu);
+		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+						"AP_ARM_CORESIGHT_ONLINE",
+						etm_online_cpu, NULL);
+		if (ret < 0)
+			goto err_arch_supported;
+		hp_online = ret;
+	}
 	put_online_cpus();
 
 	if (etm_arch_supported(drvdata->arch) == false) {
@@ -839,7 +845,6 @@
 
 	pm_runtime_put(&adev->dev);
 	dev_info(dev, "%s initialized\n", (char *)id->data);
-
 	if (boot_enable) {
 		coresight_enable(drvdata->csdev);
 		drvdata->boot_enable = true;
@@ -848,8 +853,11 @@
 	return 0;
 
 err_arch_supported:
-	if (--etm_count == 0)
-		unregister_hotcpu_notifier(&etm_cpu_notifier);
+	if (--etm_count == 0) {
+		cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+		if (hp_online)
+			cpuhp_remove_state_nocalls(hp_online);
+	}
 	return ret;
 }
 
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 462f0dc..1a5e0d1 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -48,6 +48,8 @@
 static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
 static void etm4_set_default(struct etmv4_config *config);
 
+static enum cpuhp_state hp_online;
+
 static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
 {
 	/* Writing any value to ETMOSLAR unlocks the trace registers */
@@ -673,47 +675,44 @@
 	config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
 }
 
-static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
-			    void *hcpu)
+static int etm4_online_cpu(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
-
 	if (!etmdrvdata[cpu])
-		goto out;
+		return 0;
 
-	switch (action & (~CPU_TASKS_FROZEN)) {
-	case CPU_STARTING:
-		spin_lock(&etmdrvdata[cpu]->spinlock);
-		if (!etmdrvdata[cpu]->os_unlock) {
-			etm4_os_unlock(etmdrvdata[cpu]);
-			etmdrvdata[cpu]->os_unlock = true;
-		}
-
-		if (local_read(&etmdrvdata[cpu]->mode))
-			etm4_enable_hw(etmdrvdata[cpu]);
-		spin_unlock(&etmdrvdata[cpu]->spinlock);
-		break;
-
-	case CPU_ONLINE:
-		if (etmdrvdata[cpu]->boot_enable &&
-			!etmdrvdata[cpu]->sticky_enable)
-			coresight_enable(etmdrvdata[cpu]->csdev);
-		break;
-
-	case CPU_DYING:
-		spin_lock(&etmdrvdata[cpu]->spinlock);
-		if (local_read(&etmdrvdata[cpu]->mode))
-			etm4_disable_hw(etmdrvdata[cpu]);
-		spin_unlock(&etmdrvdata[cpu]->spinlock);
-		break;
-	}
-out:
-	return NOTIFY_OK;
+	if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
+		coresight_enable(etmdrvdata[cpu]->csdev);
+	return 0;
 }
 
-static struct notifier_block etm4_cpu_notifier = {
-	.notifier_call = etm4_cpu_callback,
-};
+static int etm4_starting_cpu(unsigned int cpu)
+{
+	if (!etmdrvdata[cpu])
+		return 0;
+
+	spin_lock(&etmdrvdata[cpu]->spinlock);
+	if (!etmdrvdata[cpu]->os_unlock) {
+		etm4_os_unlock(etmdrvdata[cpu]);
+		etmdrvdata[cpu]->os_unlock = true;
+	}
+
+	if (local_read(&etmdrvdata[cpu]->mode))
+		etm4_enable_hw(etmdrvdata[cpu]);
+	spin_unlock(&etmdrvdata[cpu]->spinlock);
+	return 0;
+}
+
+static int etm4_dying_cpu(unsigned int cpu)
+{
+	if (!etmdrvdata[cpu])
+		return 0;
+
+	spin_lock(&etmdrvdata[cpu]->spinlock);
+	if (local_read(&etmdrvdata[cpu]->mode))
+		etm4_disable_hw(etmdrvdata[cpu]);
+	spin_unlock(&etmdrvdata[cpu]->spinlock);
+	return 0;
+}
 
 static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
 {
@@ -767,8 +766,17 @@
 				etm4_init_arch_data,  drvdata, 1))
 		dev_err(dev, "ETM arch init failed\n");
 
-	if (!etm4_count++)
-		register_hotcpu_notifier(&etm4_cpu_notifier);
+	if (!etm4_count++) {
+		cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING,
+					  "AP_ARM_CORESIGHT4_STARTING",
+					  etm4_starting_cpu, etm4_dying_cpu);
+		ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+						"AP_ARM_CORESIGHT4_ONLINE",
+						etm4_online_cpu, NULL);
+		if (ret < 0)
+			goto err_arch_supported;
+		hp_online = ret;
+	}
 
 	put_online_cpus();
 
@@ -809,8 +817,11 @@
 	return 0;
 
 err_arch_supported:
-	if (--etm4_count == 0)
-		unregister_hotcpu_notifier(&etm4_cpu_notifier);
+	if (--etm4_count == 0) {
+		cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING);
+		if (hp_online)
+			cpuhp_remove_state_nocalls(hp_online);
+	}
 	return ret;
 }
 
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 7c42b1d..8bcee65 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -345,38 +345,20 @@
 		ARMADA_370_XP_SW_TRIG_INT_OFFS);
 }
 
-static int armada_xp_mpic_secondary_init(struct notifier_block *nfb,
-					 unsigned long action, void *hcpu)
+static int armada_xp_mpic_starting_cpu(unsigned int cpu)
 {
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
-		armada_xp_mpic_perf_init();
-		armada_xp_mpic_smp_cpu_init();
-	}
-
-	return NOTIFY_OK;
+	armada_xp_mpic_perf_init();
+	armada_xp_mpic_smp_cpu_init();
+	return 0;
 }
 
-static struct notifier_block armada_370_xp_mpic_cpu_notifier = {
-	.notifier_call = armada_xp_mpic_secondary_init,
-	.priority = 100,
-};
-
-static int mpic_cascaded_secondary_init(struct notifier_block *nfb,
-					unsigned long action, void *hcpu)
+static int mpic_cascaded_starting_cpu(unsigned int cpu)
 {
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) {
-		armada_xp_mpic_perf_init();
-		enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
-	}
-
-	return NOTIFY_OK;
+	armada_xp_mpic_perf_init();
+	enable_percpu_irq(parent_irq, IRQ_TYPE_NONE);
+	return 0;
 }
-
-static struct notifier_block mpic_cascaded_cpu_notifier = {
-	.notifier_call = mpic_cascaded_secondary_init,
-	.priority = 100,
-};
-#endif /* CONFIG_SMP */
+#endif
 
 static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = {
 	.map = armada_370_xp_mpic_irq_map,
@@ -595,11 +577,15 @@
 		set_handle_irq(armada_370_xp_handle_irq);
 #ifdef CONFIG_SMP
 		set_smp_cross_call(armada_mpic_send_doorbell);
-		register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier);
+		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+					  "AP_IRQ_ARMADA_XP_STARTING",
+					  armada_xp_mpic_starting_cpu, NULL);
 #endif
 	} else {
 #ifdef CONFIG_SMP
-		register_cpu_notifier(&mpic_cascaded_cpu_notifier);
+		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
+					  "AP_IRQ_ARMADA_CASC_STARTING",
+					  mpic_cascaded_starting_cpu, NULL);
 #endif
 		irq_set_chained_handler(parent_irq,
 					armada_370_xp_mpic_handle_cascade_irq);
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index df1949c..d96b2c9 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -202,26 +202,19 @@
 	}
 }
 
-/* Unmasks the IPI on the CPU when it's online. */
-static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb,
-					  unsigned long action, void *hcpu)
+static int bcm2836_cpu_starting(unsigned int cpu)
 {
-	unsigned int cpu = (unsigned long)hcpu;
-	unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0;
-	unsigned int mailbox = 0;
-
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu);
-	else if (action == CPU_DYING)
-		bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu);
-
-	return NOTIFY_OK;
+	bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+					       cpu);
+	return 0;
 }
 
-static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
-	.notifier_call = bcm2836_arm_irqchip_cpu_notify,
-	.priority = 100,
-};
+static int bcm2836_cpu_dying(unsigned int cpu)
+{
+	bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0,
+					     cpu);
+	return 0;
+}
 
 #ifdef CONFIG_ARM
 static int __init bcm2836_smp_boot_secondary(unsigned int cpu,
@@ -251,10 +244,9 @@
 {
 #ifdef CONFIG_SMP
 	/* Unmask IPIs to the boot CPU. */
-	bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
-				       CPU_STARTING,
-				       (void *)(uintptr_t)smp_processor_id());
-	register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
+	cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING,
+			  "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting,
+			  bcm2836_cpu_dying);
 
 	set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
 
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 2c5ba0e..6fc56c3 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -538,22 +538,12 @@
 }
 
 #ifdef CONFIG_SMP
-static int gic_secondary_init(struct notifier_block *nfb,
-			      unsigned long action, void *hcpu)
-{
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		gic_cpu_init();
-	return NOTIFY_OK;
-}
 
-/*
- * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block gic_cpu_notifier = {
-	.notifier_call = gic_secondary_init,
-	.priority = 100,
-};
+static int gic_starting_cpu(unsigned int cpu)
+{
+	gic_cpu_init();
+	return 0;
+}
 
 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
 				   unsigned long cluster_id)
@@ -634,7 +624,9 @@
 static void gic_smp_init(void)
 {
 	set_smp_cross_call(gic_raise_softirq);
-	register_cpu_notifier(&gic_cpu_notifier);
+	cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING,
+				  "AP_IRQ_GICV3_STARTING", gic_starting_cpu,
+				  NULL);
 }
 
 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1de07eb..c2cab57 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -984,25 +984,12 @@
 	return -EINVAL;
 }
 
-#ifdef CONFIG_SMP
-static int gic_secondary_init(struct notifier_block *nfb, unsigned long action,
-			      void *hcpu)
+static int gic_starting_cpu(unsigned int cpu)
 {
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		gic_cpu_init(&gic_data[0]);
-	return NOTIFY_OK;
+	gic_cpu_init(&gic_data[0]);
+	return 0;
 }
 
-/*
- * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block gic_cpu_notifier = {
-	.notifier_call = gic_secondary_init,
-	.priority = 100,
-};
-#endif
-
 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
 				unsigned int nr_irqs, void *arg)
 {
@@ -1177,8 +1164,10 @@
 			gic_cpu_map[i] = 0xff;
 #ifdef CONFIG_SMP
 		set_smp_cross_call(gic_raise_softirq);
-		register_cpu_notifier(&gic_cpu_notifier);
 #endif
+		cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+					  "AP_IRQ_GIC_STARTING",
+					  gic_starting_cpu, NULL);
 		set_handle_irq(gic_handle_irq);
 		if (static_key_true(&supports_deactivate))
 			pr_info("GIC: Using split EOI/Deactivate mode\n");
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9e25d8c..021b0e0 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -342,26 +342,12 @@
 	return ret;
 }
 
-#ifdef CONFIG_SMP
-static int hip04_irq_secondary_init(struct notifier_block *nfb,
-				    unsigned long action,
-				    void *hcpu)
+static int hip04_irq_starting_cpu(unsigned int cpu)
 {
-	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
-		hip04_irq_cpu_init(&hip04_data);
-	return NOTIFY_OK;
+	hip04_irq_cpu_init(&hip04_data);
+	return 0;
 }
 
-/*
- * Notifier for enabling the INTC CPU interface. Set an arbitrarily high
- * priority because the GIC needs to be up before the ARM generic timers.
- */
-static struct notifier_block hip04_irq_cpu_notifier = {
-	.notifier_call	= hip04_irq_secondary_init,
-	.priority	= 100,
-};
-#endif
-
 static const struct irq_domain_ops hip04_irq_domain_ops = {
 	.map	= hip04_irq_domain_map,
 	.xlate	= hip04_irq_domain_xlate,
@@ -417,13 +403,12 @@
 
 #ifdef CONFIG_SMP
 	set_smp_cross_call(hip04_raise_softirq);
-	register_cpu_notifier(&hip04_irq_cpu_notifier);
 #endif
 	set_handle_irq(hip04_handle_irq);
 
 	hip04_irq_dist_init(&hip04_data);
-	hip04_irq_cpu_init(&hip04_data);
-
+	cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING",
+			  hip04_irq_starting_cpu, NULL);
 	return 0;
 }
 IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init);
diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c
index 938467f..22f0634 100644
--- a/drivers/leds/trigger/ledtrig-cpu.c
+++ b/drivers/leds/trigger/ledtrig-cpu.c
@@ -92,29 +92,22 @@
 	.resume		= ledtrig_cpu_syscore_resume,
 };
 
-static int ledtrig_cpu_notify(struct notifier_block *self,
-					   unsigned long action, void *hcpu)
+static int ledtrig_online_cpu(unsigned int cpu)
 {
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_STARTING:
-		ledtrig_cpu(CPU_LED_START);
-		break;
-	case CPU_DYING:
-		ledtrig_cpu(CPU_LED_STOP);
-		break;
-	}
-
-	return NOTIFY_OK;
+	ledtrig_cpu(CPU_LED_START);
+	return 0;
 }
 
-
-static struct notifier_block ledtrig_cpu_nb = {
-	.notifier_call = ledtrig_cpu_notify,
-};
+static int ledtrig_prepare_down_cpu(unsigned int cpu)
+{
+	ledtrig_cpu(CPU_LED_STOP);
+	return 0;
+}
 
 static int __init ledtrig_cpu_init(void)
 {
 	int cpu;
+	int ret;
 
 	/* Supports up to 9999 cpu cores */
 	BUILD_BUG_ON(CONFIG_NR_CPUS > 9999);
@@ -133,7 +126,12 @@
 	}
 
 	register_syscore_ops(&ledtrig_cpu_syscore_ops);
-	register_cpu_notifier(&ledtrig_cpu_nb);
+
+	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING",
+				ledtrig_online_cpu, ledtrig_prepare_down_cpu);
+	if (ret < 0)
+		pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n",
+		       ret);
 
 	pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n");
 
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 8e4d7f5..6ccb994 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -688,30 +688,29 @@
 	return 0;
 }
 
+static DEFINE_MUTEX(arm_pmu_mutex);
+static LIST_HEAD(arm_pmu_list);
+
 /*
  * PMU hardware loses all context when a CPU goes offline.
  * When a CPU is hotplugged back in, since some hardware registers are
  * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
  * junk values out of them.
  */
-static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
-			  void *hcpu)
+static int arm_perf_starting_cpu(unsigned int cpu)
 {
-	int cpu = (unsigned long)hcpu;
-	struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
+	struct arm_pmu *pmu;
 
-	if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
-		return NOTIFY_DONE;
+	mutex_lock(&arm_pmu_mutex);
+	list_for_each_entry(pmu, &arm_pmu_list, entry) {
 
-	if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
-		return NOTIFY_DONE;
-
-	if (pmu->reset)
-		pmu->reset(pmu);
-	else
-		return NOTIFY_DONE;
-
-	return NOTIFY_OK;
+		if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
+			continue;
+		if (pmu->reset)
+			pmu->reset(pmu);
+	}
+	mutex_unlock(&arm_pmu_mutex);
+	return 0;
 }
 
 #ifdef CONFIG_CPU_PM
@@ -822,10 +821,9 @@
 	if (!cpu_hw_events)
 		return -ENOMEM;
 
-	cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
-	err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
-	if (err)
-		goto out_hw_events;
+	mutex_lock(&arm_pmu_mutex);
+	list_add_tail(&cpu_pmu->entry, &arm_pmu_list);
+	mutex_unlock(&arm_pmu_mutex);
 
 	err = cpu_pm_pmu_register(cpu_pmu);
 	if (err)
@@ -861,8 +859,9 @@
 	return 0;
 
 out_unregister:
-	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
-out_hw_events:
+	mutex_lock(&arm_pmu_mutex);
+	list_del(&cpu_pmu->entry);
+	mutex_unlock(&arm_pmu_mutex);
 	free_percpu(cpu_hw_events);
 	return err;
 }
@@ -870,7 +869,9 @@
 static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 {
 	cpu_pm_pmu_unregister(cpu_pmu);
-	unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
+	mutex_lock(&arm_pmu_mutex);
+	list_del(&cpu_pmu->entry);
+	mutex_unlock(&arm_pmu_mutex);
 	free_percpu(cpu_pmu->hw_events);
 }
 
@@ -1061,3 +1062,17 @@
 	kfree(pmu);
 	return ret;
 }
+
+static int arm_pmu_hp_init(void)
+{
+	int ret;
+
+	ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING,
+					"AP_PERF_ARM_STARTING",
+					arm_perf_starting_cpu, NULL);
+	if (ret)
+		pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
+		       ret);
+	return ret;
+}
+subsys_initcall(arm_pmu_hp_init);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 21597dc..797d9c8 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -55,17 +55,6 @@
 #endif
 struct notifier_block;
 
-/*
- * CPU notifier priorities.
- */
-enum {
-	CPU_PRI_PERF		= 20,
-
-	/* bring up workqueues before normal notifiers and down after */
-	CPU_PRI_WORKQUEUE_UP	= 5,
-	CPU_PRI_WORKQUEUE_DOWN	= -5,
-};
-
 #define CPU_ONLINE		0x0002 /* CPU (unsigned)v is up */
 #define CPU_UP_PREPARE		0x0003 /* CPU (unsigned)v coming up */
 #define CPU_UP_CANCELED		0x0004 /* CPU (unsigned)v NOT coming up */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 386374d..242bf53 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -4,19 +4,95 @@
 enum cpuhp_state {
 	CPUHP_OFFLINE,
 	CPUHP_CREATE_THREADS,
+	CPUHP_PERF_PREPARE,
+	CPUHP_PERF_X86_PREPARE,
+	CPUHP_PERF_X86_UNCORE_PREP,
+	CPUHP_PERF_X86_AMD_UNCORE_PREP,
+	CPUHP_PERF_X86_RAPL_PREP,
+	CPUHP_PERF_BFIN,
+	CPUHP_PERF_POWER,
+	CPUHP_PERF_SUPERH,
+	CPUHP_X86_HPET_DEAD,
+	CPUHP_X86_APB_DEAD,
+	CPUHP_WORKQUEUE_PREP,
+	CPUHP_POWER_NUMA_PREPARE,
+	CPUHP_HRTIMERS_PREPARE,
+	CPUHP_PROFILE_PREPARE,
+	CPUHP_X2APIC_PREPARE,
+	CPUHP_SMPCFD_PREPARE,
+	CPUHP_RCUTREE_PREP,
 	CPUHP_NOTIFY_PREPARE,
+	CPUHP_TIMERS_DEAD,
 	CPUHP_BRINGUP_CPU,
 	CPUHP_AP_IDLE_DEAD,
 	CPUHP_AP_OFFLINE,
 	CPUHP_AP_SCHED_STARTING,
+	CPUHP_AP_RCUTREE_DYING,
+	CPUHP_AP_IRQ_GIC_STARTING,
+	CPUHP_AP_IRQ_GICV3_STARTING,
+	CPUHP_AP_IRQ_HIP04_STARTING,
+	CPUHP_AP_IRQ_ARMADA_XP_STARTING,
+	CPUHP_AP_IRQ_ARMADA_CASC_STARTING,
+	CPUHP_AP_IRQ_BCM2836_STARTING,
+	CPUHP_AP_ARM_MVEBU_COHERENCY,
+	CPUHP_AP_PERF_X86_UNCORE_STARTING,
+	CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
+	CPUHP_AP_PERF_X86_STARTING,
+	CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
+	CPUHP_AP_PERF_X86_CQM_STARTING,
+	CPUHP_AP_PERF_X86_CSTATE_STARTING,
+	CPUHP_AP_PERF_XTENSA_STARTING,
+	CPUHP_AP_PERF_METAG_STARTING,
+	CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+	CPUHP_AP_ARM_VFP_STARTING,
+	CPUHP_AP_PERF_ARM_STARTING,
+	CPUHP_AP_ARM_L2X0_STARTING,
+	CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+	CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
+	CPUHP_AP_DUMMY_TIMER_STARTING,
+	CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
+	CPUHP_AP_ARM_TWD_STARTING,
+	CPUHP_AP_METAG_TIMER_STARTING,
+	CPUHP_AP_QCOM_TIMER_STARTING,
+	CPUHP_AP_ARMADA_TIMER_STARTING,
+	CPUHP_AP_MARCO_TIMER_STARTING,
+	CPUHP_AP_MIPS_GIC_TIMER_STARTING,
+	CPUHP_AP_ARC_TIMER_STARTING,
+	CPUHP_AP_KVM_STARTING,
+	CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
+	CPUHP_AP_KVM_ARM_VGIC_STARTING,
+	CPUHP_AP_KVM_ARM_TIMER_STARTING,
+	CPUHP_AP_ARM_XEN_STARTING,
+	CPUHP_AP_ARM_CORESIGHT_STARTING,
+	CPUHP_AP_ARM_CORESIGHT4_STARTING,
+	CPUHP_AP_ARM64_ISNDEP_STARTING,
+	CPUHP_AP_SMPCFD_DYING,
+	CPUHP_AP_X86_TBOOT_DYING,
 	CPUHP_AP_NOTIFY_STARTING,
 	CPUHP_AP_ONLINE,
 	CPUHP_TEARDOWN_CPU,
 	CPUHP_AP_ONLINE_IDLE,
 	CPUHP_AP_SMPBOOT_THREADS,
+	CPUHP_AP_X86_VDSO_VMA_ONLINE,
+	CPUHP_AP_PERF_ONLINE,
+	CPUHP_AP_PERF_X86_ONLINE,
+	CPUHP_AP_PERF_X86_UNCORE_ONLINE,
+	CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
+	CPUHP_AP_PERF_X86_AMD_POWER_ONLINE,
+	CPUHP_AP_PERF_X86_RAPL_ONLINE,
+	CPUHP_AP_PERF_X86_CQM_ONLINE,
+	CPUHP_AP_PERF_X86_CSTATE_ONLINE,
+	CPUHP_AP_PERF_S390_CF_ONLINE,
+	CPUHP_AP_PERF_S390_SF_ONLINE,
+	CPUHP_AP_PERF_ARM_CCI_ONLINE,
+	CPUHP_AP_PERF_ARM_CCN_ONLINE,
+	CPUHP_AP_WORKQUEUE_ONLINE,
+	CPUHP_AP_RCUTREE_ONLINE,
 	CPUHP_AP_NOTIFY_ONLINE,
 	CPUHP_AP_ONLINE_DYN,
 	CPUHP_AP_ONLINE_DYN_END		= CPUHP_AP_ONLINE_DYN + 30,
+	CPUHP_AP_X86_HPET_ONLINE,
+	CPUHP_AP_X86_KVM_CLK_ONLINE,
 	CPUHP_AP_ACTIVE,
 	CPUHP_ONLINE,
 };
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index c98c653..5e00f80 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -494,4 +494,11 @@
 /* Show pending timers: */
 extern void sysrq_timer_list_show(void);
 
+int hrtimers_prepare_cpu(unsigned int cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+int hrtimers_dead_cpu(unsigned int cpu);
+#else
+#define hrtimers_dead_cpu	NULL
+#endif
+
 #endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index d28ac05..e188438 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -109,7 +109,7 @@
 	DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
 	struct platform_device	*plat_device;
 	struct pmu_hw_events	__percpu *hw_events;
-	struct notifier_block	hotplug_nb;
+	struct list_head	entry;
 	struct notifier_block	cpu_pm_nb;
 };
 
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e1f921c..8ed43261 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1309,41 +1309,6 @@
 
 #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x))
 
-/*
- * This has to have a higher priority than migration_notifier in sched/core.c.
- */
-#define perf_cpu_notifier(fn)						\
-do {									\
-	static struct notifier_block fn##_nb =				\
-		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
-	unsigned long cpu = smp_processor_id();				\
-	unsigned long flags;						\
-									\
-	cpu_notifier_register_begin();					\
-	fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,			\
-		(void *)(unsigned long)cpu);				\
-	local_irq_save(flags);						\
-	fn(&fn##_nb, (unsigned long)CPU_STARTING,			\
-		(void *)(unsigned long)cpu);				\
-	local_irq_restore(flags);					\
-	fn(&fn##_nb, (unsigned long)CPU_ONLINE,				\
-		(void *)(unsigned long)cpu);				\
-	__register_cpu_notifier(&fn##_nb);				\
-	cpu_notifier_register_done();					\
-} while (0)
-
-/*
- * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the
- * callback for already online CPUs.
- */
-#define __perf_cpu_notifier(fn)						\
-do {									\
-	static struct notifier_block fn##_nb =				\
-		{ .notifier_call = fn, .priority = CPU_PRI_PERF };	\
-									\
-	__register_cpu_notifier(&fn##_nb);				\
-} while (0)
-
 struct perf_pmu_events_attr {
 	struct device_attribute attr;
 	u64 id;
@@ -1385,4 +1350,13 @@
 									\
 static struct device_attribute format_attr_##_name = __ATTR_RO(_name)
 
+/* Performance counter hotplug functions */
+#ifdef CONFIG_PERF_EVENTS
+int perf_event_init_cpu(unsigned int cpu);
+int perf_event_exit_cpu(unsigned int cpu);
+#else
+#define perf_event_init_cpu	NULL
+#define perf_event_exit_cpu	NULL
+#endif
+
 #endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 93aea75..ac81e40 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -243,4 +243,11 @@
 	barrier(); /* Avoid RCU read-side critical sections leaking across. */
 }
 
+/* RCUtree hotplug events */
+#define rcutree_prepare_cpu      NULL
+#define rcutree_online_cpu       NULL
+#define rcutree_offline_cpu      NULL
+#define rcutree_dead_cpu         NULL
+#define rcutree_dying_cpu        NULL
+
 #endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 5043cb8..63a4e4c 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -111,4 +111,11 @@
 
 void rcu_all_qs(void);
 
+/* RCUtree hotplug events */
+int rcutree_prepare_cpu(unsigned int cpu);
+int rcutree_online_cpu(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+int rcutree_dead_cpu(unsigned int cpu);
+int rcutree_dying_cpu(unsigned int cpu);
+
 #endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index c441407..eccae469 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -196,4 +196,9 @@
 
 void smp_setup_processor_id(void);
 
+/* SMP core functions */
+int smpcfd_prepare_cpu(unsigned int cpu);
+int smpcfd_dead_cpu(unsigned int cpu);
+int smpcfd_dying_cpu(unsigned int cpu);
+
 #endif /* __LINUX_SMP_H */
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 4419506..51d601f 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -273,4 +273,10 @@
 unsigned long round_jiffies_up(unsigned long j);
 unsigned long round_jiffies_up_relative(unsigned long j);
 
+#ifdef CONFIG_HOTPLUG_CPU
+int timers_dead_cpu(unsigned int cpu);
+#else
+#define timers_dead_cpu NULL
+#endif
+
 #endif
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index ca73c50..26cc1df 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -625,4 +625,10 @@
 static inline void wq_watchdog_touch(int cpu) { }
 #endif	/* CONFIG_WQ_WATCHDOG */
 
+#ifdef CONFIG_SMP
+int workqueue_prepare_cpu(unsigned int cpu);
+int workqueue_online_cpu(unsigned int cpu);
+int workqueue_offline_cpu(unsigned int cpu);
+#endif
+
 #endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 7b61887..341bf80 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -517,6 +517,13 @@
 	if (!cpu_online(cpu))
 		return 0;
 
+	/*
+	 * If we are up and running, use the hotplug thread. For early calls
+	 * we invoke the thread function directly.
+	 */
+	if (!st->thread)
+		return cpuhp_invoke_callback(cpu, state, cb);
+
 	st->cb_state = state;
 	st->cb = cb;
 	/*
@@ -1173,6 +1180,31 @@
 		.teardown		= NULL,
 		.cant_stop		= true,
 	},
+	[CPUHP_PERF_PREPARE] = {
+		.name = "perf prepare",
+		.startup = perf_event_init_cpu,
+		.teardown = perf_event_exit_cpu,
+	},
+	[CPUHP_WORKQUEUE_PREP] = {
+		.name = "workqueue prepare",
+		.startup = workqueue_prepare_cpu,
+		.teardown = NULL,
+	},
+	[CPUHP_HRTIMERS_PREPARE] = {
+		.name = "hrtimers prepare",
+		.startup = hrtimers_prepare_cpu,
+		.teardown = hrtimers_dead_cpu,
+	},
+	[CPUHP_SMPCFD_PREPARE] = {
+		.name = "SMPCFD prepare",
+		.startup = smpcfd_prepare_cpu,
+		.teardown = smpcfd_dead_cpu,
+	},
+	[CPUHP_RCUTREE_PREP] = {
+		.name = "RCU-tree prepare",
+		.startup = rcutree_prepare_cpu,
+		.teardown = rcutree_dead_cpu,
+	},
 	/*
 	 * Preparatory and dead notifiers. Will be replaced once the notifiers
 	 * are converted to states.
@@ -1184,6 +1216,16 @@
 		.skip_onerr		= true,
 		.cant_stop		= true,
 	},
+	/*
+	 * On the tear-down path, timers_dead_cpu() must be invoked
+	 * before blk_mq_queue_reinit_notify() from notify_dead(),
+	 * otherwise a RCU stall occurs.
+	 */
+	[CPUHP_TIMERS_DEAD] = {
+		.name = "timers dead",
+		.startup = NULL,
+		.teardown = timers_dead_cpu,
+	},
 	/* Kicks the plugged cpu into life */
 	[CPUHP_BRINGUP_CPU] = {
 		.name			= "cpu:bringup",
@@ -1191,6 +1233,10 @@
 		.teardown		= NULL,
 		.cant_stop		= true,
 	},
+	[CPUHP_AP_SMPCFD_DYING] = {
+		.startup = NULL,
+		.teardown = smpcfd_dying_cpu,
+	},
 	/*
 	 * Handled on controll processor until the plugged processor manages
 	 * this itself.
@@ -1227,6 +1273,10 @@
 		.startup		= sched_cpu_starting,
 		.teardown		= sched_cpu_dying,
 	},
+	[CPUHP_AP_RCUTREE_DYING] = {
+		.startup = NULL,
+		.teardown = rcutree_dying_cpu,
+	},
 	/*
 	 * Low level startup/teardown notifiers. Run with interrupts
 	 * disabled. Will be removed once the notifiers are converted to
@@ -1250,6 +1300,22 @@
 		.startup		= smpboot_unpark_threads,
 		.teardown		= NULL,
 	},
+	[CPUHP_AP_PERF_ONLINE] = {
+		.name = "perf online",
+		.startup = perf_event_init_cpu,
+		.teardown = perf_event_exit_cpu,
+	},
+	[CPUHP_AP_WORKQUEUE_ONLINE] = {
+		.name = "workqueue online",
+		.startup = workqueue_online_cpu,
+		.teardown = workqueue_offline_cpu,
+	},
+	[CPUHP_AP_RCUTREE_ONLINE] = {
+		.name = "RCU-tree online",
+		.startup = rcutree_online_cpu,
+		.teardown = rcutree_offline_cpu,
+	},
+
 	/*
 	 * Online/down_prepare notifiers. Will be removed once the notifiers
 	 * are converted to states.
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 09ae27b..356a6c7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10357,7 +10357,7 @@
 	}
 }
 
-static void perf_event_init_cpu(int cpu)
+int perf_event_init_cpu(unsigned int cpu)
 {
 	struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
@@ -10370,6 +10370,7 @@
 		rcu_assign_pointer(swhash->swevent_hlist, hlist);
 	}
 	mutex_unlock(&swhash->hlist_mutex);
+	return 0;
 }
 
 #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
@@ -10401,14 +10402,17 @@
 	}
 	srcu_read_unlock(&pmus_srcu, idx);
 }
+#else
 
-static void perf_event_exit_cpu(int cpu)
+static void perf_event_exit_cpu_context(int cpu) { }
+
+#endif
+
+int perf_event_exit_cpu(unsigned int cpu)
 {
 	perf_event_exit_cpu_context(cpu);
+	return 0;
 }
-#else
-static inline void perf_event_exit_cpu(int cpu) { }
-#endif
 
 static int
 perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
@@ -10430,46 +10434,6 @@
 	.priority = INT_MIN,
 };
 
-static int
-perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
-{
-	unsigned int cpu = (long)hcpu;
-
-	switch (action & ~CPU_TASKS_FROZEN) {
-
-	case CPU_UP_PREPARE:
-		/*
-		 * This must be done before the CPU comes alive, because the
-		 * moment we can run tasks we can encounter (software) events.
-		 *
-		 * Specifically, someone can have inherited events on kthreadd
-		 * or a pre-existing worker thread that gets re-bound.
-		 */
-		perf_event_init_cpu(cpu);
-		break;
-
-	case CPU_DOWN_PREPARE:
-		/*
-		 * This must be done before the CPU dies because after that an
-		 * active event might want to IPI the CPU and that'll not work
-		 * so great for dead CPUs.
-		 *
-		 * XXX smp_call_function_single() return -ENXIO without a warn
-		 * so we could possibly deal with this.
-		 *
-		 * This is safe against new events arriving because
-		 * sys_perf_event_open() serializes against hotplug using
-		 * get_online_cpus().
-		 */
-		perf_event_exit_cpu(cpu);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
 void __init perf_event_init(void)
 {
 	int ret;
@@ -10482,7 +10446,7 @@
 	perf_pmu_register(&perf_cpu_clock, NULL, -1);
 	perf_pmu_register(&perf_task_clock, NULL, -1);
 	perf_tp_register();
-	perf_cpu_notifier(perf_cpu_notify);
+	perf_event_init_cpu(smp_processor_id());
 	register_reboot_notifier(&perf_reboot_notifier);
 
 	ret = init_hw_breakpoint();
diff --git a/kernel/profile.c b/kernel/profile.c
index c2199e9..2dbccf2 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -328,68 +328,57 @@
 	put_cpu();
 }
 
-static int profile_cpu_callback(struct notifier_block *info,
-					unsigned long action, void *__cpu)
+static int profile_dead_cpu(unsigned int cpu)
 {
-	int node, cpu = (unsigned long)__cpu;
+	struct page *page;
+	int i;
+
+	if (prof_cpu_mask != NULL)
+		cpumask_clear_cpu(cpu, prof_cpu_mask);
+
+	for (i = 0; i < 2; i++) {
+		if (per_cpu(cpu_profile_hits, cpu)[i]) {
+			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]);
+			per_cpu(cpu_profile_hits, cpu)[i] = NULL;
+			__free_page(page);
+		}
+	}
+	return 0;
+}
+
+static int profile_prepare_cpu(unsigned int cpu)
+{
+	int i, node = cpu_to_mem(cpu);
 	struct page *page;
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		node = cpu_to_mem(cpu);
-		per_cpu(cpu_profile_flip, cpu) = 0;
-		if (!per_cpu(cpu_profile_hits, cpu)[1]) {
-			page = __alloc_pages_node(node,
-					GFP_KERNEL | __GFP_ZERO,
-					0);
-			if (!page)
-				return notifier_from_errno(-ENOMEM);
-			per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
+	per_cpu(cpu_profile_flip, cpu) = 0;
+
+	for (i = 0; i < 2; i++) {
+		if (per_cpu(cpu_profile_hits, cpu)[i])
+			continue;
+
+		page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
+		if (!page) {
+			profile_dead_cpu(cpu);
+			return -ENOMEM;
 		}
-		if (!per_cpu(cpu_profile_hits, cpu)[0]) {
-			page = __alloc_pages_node(node,
-					GFP_KERNEL | __GFP_ZERO,
-					0);
-			if (!page)
-				goto out_free;
-			per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
-		}
-		break;
-out_free:
-		page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
-		per_cpu(cpu_profile_hits, cpu)[1] = NULL;
-		__free_page(page);
-		return notifier_from_errno(-ENOMEM);
-	case CPU_ONLINE:
-	case CPU_ONLINE_FROZEN:
-		if (prof_cpu_mask != NULL)
-			cpumask_set_cpu(cpu, prof_cpu_mask);
-		break;
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		if (prof_cpu_mask != NULL)
-			cpumask_clear_cpu(cpu, prof_cpu_mask);
-		if (per_cpu(cpu_profile_hits, cpu)[0]) {
-			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
-			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
-			__free_page(page);
-		}
-		if (per_cpu(cpu_profile_hits, cpu)[1]) {
-			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
-			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
-			__free_page(page);
-		}
-		break;
+		per_cpu(cpu_profile_hits, cpu)[i] = page_address(page);
+
 	}
-	return NOTIFY_OK;
+	return 0;
 }
+
+static int profile_online_cpu(unsigned int cpu)
+{
+	if (prof_cpu_mask != NULL)
+		cpumask_set_cpu(cpu, prof_cpu_mask);
+
+	return 0;
+}
+
 #else /* !CONFIG_SMP */
 #define profile_flip_buffers()		do { } while (0)
 #define profile_discard_flip_buffers()	do { } while (0)
-#define profile_cpu_callback		NULL
 
 static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
 {
@@ -531,83 +520,43 @@
 	.llseek		= default_llseek,
 };
 
-#ifdef CONFIG_SMP
-static void profile_nop(void *unused)
-{
-}
-
-static int create_hash_tables(void)
-{
-	int cpu;
-
-	for_each_online_cpu(cpu) {
-		int node = cpu_to_mem(cpu);
-		struct page *page;
-
-		page = __alloc_pages_node(node,
-				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
-				0);
-		if (!page)
-			goto out_cleanup;
-		per_cpu(cpu_profile_hits, cpu)[1]
-				= (struct profile_hit *)page_address(page);
-		page = __alloc_pages_node(node,
-				GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
-				0);
-		if (!page)
-			goto out_cleanup;
-		per_cpu(cpu_profile_hits, cpu)[0]
-				= (struct profile_hit *)page_address(page);
-	}
-	return 0;
-out_cleanup:
-	prof_on = 0;
-	smp_mb();
-	on_each_cpu(profile_nop, NULL, 1);
-	for_each_online_cpu(cpu) {
-		struct page *page;
-
-		if (per_cpu(cpu_profile_hits, cpu)[0]) {
-			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
-			per_cpu(cpu_profile_hits, cpu)[0] = NULL;
-			__free_page(page);
-		}
-		if (per_cpu(cpu_profile_hits, cpu)[1]) {
-			page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
-			per_cpu(cpu_profile_hits, cpu)[1] = NULL;
-			__free_page(page);
-		}
-	}
-	return -1;
-}
-#else
-#define create_hash_tables()			({ 0; })
-#endif
-
-int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */
+int __ref create_proc_profile(void)
 {
 	struct proc_dir_entry *entry;
+#ifdef CONFIG_SMP
+	enum cpuhp_state online_state;
+#endif
+
 	int err = 0;
 
 	if (!prof_on)
 		return 0;
+#ifdef CONFIG_SMP
+	err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE",
+				profile_prepare_cpu, profile_dead_cpu);
+	if (err)
+		return err;
 
-	cpu_notifier_register_begin();
-
-	if (create_hash_tables()) {
-		err = -ENOMEM;
-		goto out;
-	}
-
+	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE",
+				profile_online_cpu, NULL);
+	if (err < 0)
+		goto err_state_prep;
+	online_state = err;
+	err = 0;
+#endif
 	entry = proc_create("profile", S_IWUSR | S_IRUGO,
 			    NULL, &proc_profile_operations);
 	if (!entry)
-		goto out;
+		goto err_state_onl;
 	proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t));
-	__hotcpu_notifier(profile_cpu_callback, 0);
 
-out:
-	cpu_notifier_register_done();
+	return err;
+err_state_onl:
+#ifdef CONFIG_SMP
+	cpuhp_remove_state(online_state);
+err_state_prep:
+	cpuhp_remove_state(CPUHP_PROFILE_PREPARE);
+#endif
 	return err;
 }
 subsys_initcall(create_proc_profile);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f433959..5d80925 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -1073,11 +1073,11 @@
  * offline to continue to use RCU for one jiffy after marking itself
  * offline in the cpu_online_mask.  This leniency is necessary given the
  * non-atomic nature of the online and offline processing, for example,
- * the fact that a CPU enters the scheduler after completing the CPU_DYING
- * notifiers.
+ * the fact that a CPU enters the scheduler after completing the teardown
+ * of the CPU.
  *
- * This is also why RCU internally marks CPUs online during the
- * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
+ * This is also why RCU internally marks CPUs online during in the
+ * preparation phase and offline after the CPU has been taken down.
  *
  * Disable checking if in an NMI handler because we cannot safely report
  * errors from NMI handlers anyway.
@@ -3806,12 +3806,58 @@
 	raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
 
-static void rcu_prepare_cpu(int cpu)
+int rcutree_prepare_cpu(unsigned int cpu)
 {
 	struct rcu_state *rsp;
 
 	for_each_rcu_flavor(rsp)
 		rcu_init_percpu_data(cpu, rsp);
+
+	rcu_prepare_kthreads(cpu);
+	rcu_spawn_all_nocb_kthreads(cpu);
+
+	return 0;
+}
+
+static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
+{
+	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
+
+	rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
+}
+
+int rcutree_online_cpu(unsigned int cpu)
+{
+	sync_sched_exp_online_cleanup(cpu);
+	rcutree_affinity_setting(cpu, -1);
+	return 0;
+}
+
+int rcutree_offline_cpu(unsigned int cpu)
+{
+	rcutree_affinity_setting(cpu, cpu);
+	return 0;
+}
+
+
+int rcutree_dying_cpu(unsigned int cpu)
+{
+	struct rcu_state *rsp;
+
+	for_each_rcu_flavor(rsp)
+		rcu_cleanup_dying_cpu(rsp);
+	return 0;
+}
+
+int rcutree_dead_cpu(unsigned int cpu)
+{
+	struct rcu_state *rsp;
+
+	for_each_rcu_flavor(rsp) {
+		rcu_cleanup_dead_cpu(cpu, rsp);
+		do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
+	}
+	return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -3851,52 +3897,6 @@
 }
 #endif
 
-/*
- * Handle CPU online/offline notification events.
- */
-int rcu_cpu_notify(struct notifier_block *self,
-		   unsigned long action, void *hcpu)
-{
-	long cpu = (long)hcpu;
-	struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
-	struct rcu_node *rnp = rdp->mynode;
-	struct rcu_state *rsp;
-
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		rcu_prepare_cpu(cpu);
-		rcu_prepare_kthreads(cpu);
-		rcu_spawn_all_nocb_kthreads(cpu);
-		break;
-	case CPU_ONLINE:
-	case CPU_DOWN_FAILED:
-		sync_sched_exp_online_cleanup(cpu);
-		rcu_boost_kthread_setaffinity(rnp, -1);
-		break;
-	case CPU_DOWN_PREPARE:
-		rcu_boost_kthread_setaffinity(rnp, cpu);
-		break;
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		for_each_rcu_flavor(rsp)
-			rcu_cleanup_dying_cpu(rsp);
-		break;
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-		for_each_rcu_flavor(rsp) {
-			rcu_cleanup_dead_cpu(cpu, rsp);
-			do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
-		}
-		break;
-	default:
-		break;
-	}
-	return NOTIFY_OK;
-}
-
 static int rcu_pm_notify(struct notifier_block *self,
 			 unsigned long action, void *hcpu)
 {
@@ -4208,10 +4208,9 @@
 	 * this is called early in boot, before either interrupts
 	 * or the scheduler are operational.
 	 */
-	cpu_notifier(rcu_cpu_notify, 0);
 	pm_notifier(rcu_pm_notify, 0);
 	for_each_online_cpu(cpu)
-		rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
+		rcutree_prepare_cpu(cpu);
 }
 
 #include "tree_exp.h"
diff --git a/kernel/smp.c b/kernel/smp.c
index 36552be..3aa642d 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,69 +33,54 @@
 
 static void flush_smp_call_function_queue(bool warn_cpu_offline);
 
-static int
-hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
+int smpcfd_prepare_cpu(unsigned int cpu)
 {
-	long cpu = (long)hcpu;
 	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
 
-	switch (action) {
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
-				cpu_to_node(cpu)))
-			return notifier_from_errno(-ENOMEM);
-		cfd->csd = alloc_percpu(struct call_single_data);
-		if (!cfd->csd) {
-			free_cpumask_var(cfd->cpumask);
-			return notifier_from_errno(-ENOMEM);
-		}
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_UP_CANCELED:
-	case CPU_UP_CANCELED_FROZEN:
-		/* Fall-through to the CPU_DEAD[_FROZEN] case. */
-
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
+	if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
+				     cpu_to_node(cpu)))
+		return -ENOMEM;
+	cfd->csd = alloc_percpu(struct call_single_data);
+	if (!cfd->csd) {
 		free_cpumask_var(cfd->cpumask);
-		free_percpu(cfd->csd);
-		break;
+		return -ENOMEM;
+	}
 
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		/*
-		 * The IPIs for the smp-call-function callbacks queued by other
-		 * CPUs might arrive late, either due to hardware latencies or
-		 * because this CPU disabled interrupts (inside stop-machine)
-		 * before the IPIs were sent. So flush out any pending callbacks
-		 * explicitly (without waiting for the IPIs to arrive), to
-		 * ensure that the outgoing CPU doesn't go offline with work
-		 * still pending.
-		 */
-		flush_smp_call_function_queue(false);
-		break;
-#endif
-	};
-
-	return NOTIFY_OK;
+	return 0;
 }
 
-static struct notifier_block hotplug_cfd_notifier = {
-	.notifier_call		= hotplug_cfd,
-};
+int smpcfd_dead_cpu(unsigned int cpu)
+{
+	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
+
+	free_cpumask_var(cfd->cpumask);
+	free_percpu(cfd->csd);
+	return 0;
+}
+
+int smpcfd_dying_cpu(unsigned int cpu)
+{
+	/*
+	 * The IPIs for the smp-call-function callbacks queued by other
+	 * CPUs might arrive late, either due to hardware latencies or
+	 * because this CPU disabled interrupts (inside stop-machine)
+	 * before the IPIs were sent. So flush out any pending callbacks
+	 * explicitly (without waiting for the IPIs to arrive), to
+	 * ensure that the outgoing CPU doesn't go offline with work
+	 * still pending.
+	 */
+	flush_smp_call_function_queue(false);
+	return 0;
+}
 
 void __init call_function_init(void)
 {
-	void *cpu = (void *)(long)smp_processor_id();
 	int i;
 
 	for_each_possible_cpu(i)
 		init_llist_head(&per_cpu(call_single_queue, i));
 
-	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
-	register_cpu_notifier(&hotplug_cfd_notifier);
+	smpcfd_prepare_cpu(smp_processor_id());
 }
 
 /*
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index d13c9ae..9ba7c82 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1590,7 +1590,7 @@
 /*
  * Functions related to boot-time initialization:
  */
-static void init_hrtimers_cpu(int cpu)
+int hrtimers_prepare_cpu(unsigned int cpu)
 {
 	struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
 	int i;
@@ -1602,6 +1602,7 @@
 
 	cpu_base->cpu = cpu;
 	hrtimer_init_hres(cpu_base);
+	return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -1636,7 +1637,7 @@
 	}
 }
 
-static void migrate_hrtimers(int scpu)
+int hrtimers_dead_cpu(unsigned int scpu)
 {
 	struct hrtimer_cpu_base *old_base, *new_base;
 	int i;
@@ -1665,45 +1666,14 @@
 	/* Check, if we got expired work to do */
 	__hrtimer_peek_ahead_timers();
 	local_irq_enable();
+	return 0;
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int hrtimer_cpu_notify(struct notifier_block *self,
-					unsigned long action, void *hcpu)
-{
-	int scpu = (long)hcpu;
-
-	switch (action) {
-
-	case CPU_UP_PREPARE:
-	case CPU_UP_PREPARE_FROZEN:
-		init_hrtimers_cpu(scpu);
-		break;
-
-#ifdef CONFIG_HOTPLUG_CPU
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		migrate_hrtimers(scpu);
-		break;
-#endif
-
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static struct notifier_block hrtimers_nb = {
-	.notifier_call = hrtimer_cpu_notify,
-};
-
 void __init hrtimers_init(void)
 {
-	hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
-			  (void *)(long)smp_processor_id());
-	register_cpu_notifier(&hrtimers_nb);
+	hrtimers_prepare_cpu(smp_processor_id());
 }
 
 /**
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index cb9ab40..555670a 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1804,7 +1804,7 @@
 	}
 }
 
-static void migrate_timers(int cpu)
+int timers_dead_cpu(unsigned int cpu)
 {
 	struct timer_base *old_base;
 	struct timer_base *new_base;
@@ -1831,29 +1831,9 @@
 		spin_unlock_irq(&new_base->lock);
 		put_cpu_ptr(&timer_bases);
 	}
+	return 0;
 }
 
-static int timer_cpu_notify(struct notifier_block *self,
-				unsigned long action, void *hcpu)
-{
-	switch (action) {
-	case CPU_DEAD:
-	case CPU_DEAD_FROZEN:
-		migrate_timers((long)hcpu);
-		break;
-	default:
-		break;
-	}
-
-	return NOTIFY_OK;
-}
-
-static inline void timer_register_cpu_notifier(void)
-{
-	cpu_notifier(timer_cpu_notify, 0);
-}
-#else
-static inline void timer_register_cpu_notifier(void) { }
 #endif /* CONFIG_HOTPLUG_CPU */
 
 static void __init init_timer_cpu(int cpu)
@@ -1881,7 +1861,6 @@
 {
 	init_timer_cpus();
 	init_timer_stats();
-	timer_register_cpu_notifier();
 	open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
 }
 
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d12bd95..ef071ca 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4607,84 +4607,65 @@
 		WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
 }
 
-/*
- * Workqueues should be brought up before normal priority CPU notifiers.
- * This will be registered high priority CPU notifier.
- */
-static int workqueue_cpu_up_callback(struct notifier_block *nfb,
-					       unsigned long action,
-					       void *hcpu)
+int workqueue_prepare_cpu(unsigned int cpu)
 {
-	int cpu = (unsigned long)hcpu;
+	struct worker_pool *pool;
+
+	for_each_cpu_worker_pool(pool, cpu) {
+		if (pool->nr_workers)
+			continue;
+		if (!create_worker(pool))
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+int workqueue_online_cpu(unsigned int cpu)
+{
 	struct worker_pool *pool;
 	struct workqueue_struct *wq;
 	int pi;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_UP_PREPARE:
-		for_each_cpu_worker_pool(pool, cpu) {
-			if (pool->nr_workers)
-				continue;
-			if (!create_worker(pool))
-				return NOTIFY_BAD;
-		}
-		break;
+	mutex_lock(&wq_pool_mutex);
 
-	case CPU_DOWN_FAILED:
-	case CPU_ONLINE:
-		mutex_lock(&wq_pool_mutex);
+	for_each_pool(pool, pi) {
+		mutex_lock(&pool->attach_mutex);
 
-		for_each_pool(pool, pi) {
-			mutex_lock(&pool->attach_mutex);
+		if (pool->cpu == cpu)
+			rebind_workers(pool);
+		else if (pool->cpu < 0)
+			restore_unbound_workers_cpumask(pool, cpu);
 
-			if (pool->cpu == cpu)
-				rebind_workers(pool);
-			else if (pool->cpu < 0)
-				restore_unbound_workers_cpumask(pool, cpu);
-
-			mutex_unlock(&pool->attach_mutex);
-		}
-
-		/* update NUMA affinity of unbound workqueues */
-		list_for_each_entry(wq, &workqueues, list)
-			wq_update_unbound_numa(wq, cpu, true);
-
-		mutex_unlock(&wq_pool_mutex);
-		break;
+		mutex_unlock(&pool->attach_mutex);
 	}
-	return NOTIFY_OK;
+
+	/* update NUMA affinity of unbound workqueues */
+	list_for_each_entry(wq, &workqueues, list)
+		wq_update_unbound_numa(wq, cpu, true);
+
+	mutex_unlock(&wq_pool_mutex);
+	return 0;
 }
 
-/*
- * Workqueues should be brought down after normal priority CPU notifiers.
- * This will be registered as low priority CPU notifier.
- */
-static int workqueue_cpu_down_callback(struct notifier_block *nfb,
-						 unsigned long action,
-						 void *hcpu)
+int workqueue_offline_cpu(unsigned int cpu)
 {
-	int cpu = (unsigned long)hcpu;
 	struct work_struct unbind_work;
 	struct workqueue_struct *wq;
 
-	switch (action & ~CPU_TASKS_FROZEN) {
-	case CPU_DOWN_PREPARE:
-		/* unbinding per-cpu workers should happen on the local CPU */
-		INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
-		queue_work_on(cpu, system_highpri_wq, &unbind_work);
+	/* unbinding per-cpu workers should happen on the local CPU */
+	INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
+	queue_work_on(cpu, system_highpri_wq, &unbind_work);
 
-		/* update NUMA affinity of unbound workqueues */
-		mutex_lock(&wq_pool_mutex);
-		list_for_each_entry(wq, &workqueues, list)
-			wq_update_unbound_numa(wq, cpu, false);
-		mutex_unlock(&wq_pool_mutex);
+	/* update NUMA affinity of unbound workqueues */
+	mutex_lock(&wq_pool_mutex);
+	list_for_each_entry(wq, &workqueues, list)
+		wq_update_unbound_numa(wq, cpu, false);
+	mutex_unlock(&wq_pool_mutex);
 
-		/* wait for per-cpu unbinding to finish */
-		flush_work(&unbind_work);
-		destroy_work_on_stack(&unbind_work);
-		break;
-	}
-	return NOTIFY_OK;
+	/* wait for per-cpu unbinding to finish */
+	flush_work(&unbind_work);
+	destroy_work_on_stack(&unbind_work);
+	return 0;
 }
 
 #ifdef CONFIG_SMP
@@ -5486,9 +5467,6 @@
 
 	pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
 
-	cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
-	hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
-
 	wq_numa_init();
 
 	/* initialize CPU pools */
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index e2d5b6f..4fde8c7 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -405,26 +405,17 @@
 	return (u64)-1;
 }
 
-static int kvm_timer_cpu_notify(struct notifier_block *self,
-				unsigned long action, void *cpu)
+static int kvm_timer_starting_cpu(unsigned int cpu)
 {
-	switch (action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
-		kvm_timer_init_interrupt(NULL);
-		break;
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		disable_percpu_irq(host_vtimer_irq);
-		break;
-	}
-
-	return NOTIFY_OK;
+	kvm_timer_init_interrupt(NULL);
+	return 0;
 }
 
-static struct notifier_block kvm_timer_cpu_nb = {
-	.notifier_call = kvm_timer_cpu_notify,
-};
+static int kvm_timer_dying_cpu(unsigned int cpu)
+{
+	disable_percpu_irq(host_vtimer_irq);
+	return 0;
+}
 
 int kvm_timer_hyp_init(void)
 {
@@ -449,12 +440,6 @@
 		goto out;
 	}
 
-	err = __register_cpu_notifier(&kvm_timer_cpu_nb);
-	if (err) {
-		kvm_err("Cannot register timer CPU notifier\n");
-		goto out_free;
-	}
-
 	wqueue = create_singlethread_workqueue("kvm_arch_timer");
 	if (!wqueue) {
 		err = -ENOMEM;
@@ -462,8 +447,10 @@
 	}
 
 	kvm_info("virtual timer IRQ%d\n", host_vtimer_irq);
-	on_each_cpu(kvm_timer_init_interrupt, NULL, 1);
 
+	cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING,
+			  "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu,
+			  kvm_timer_dying_cpu);
 	goto out;
 out_free:
 	free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus());
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c3bfbb9..67cb5e9 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -2326,32 +2326,18 @@
 		return -ENXIO;
 }
 
-static void vgic_init_maintenance_interrupt(void *info)
+static int vgic_starting_cpu(unsigned int cpu)
 {
 	enable_percpu_irq(vgic->maint_irq, 0);
+	return 0;
 }
 
-static int vgic_cpu_notify(struct notifier_block *self,
-			   unsigned long action, void *cpu)
+static int vgic_dying_cpu(unsigned int cpu)
 {
-	switch (action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
-		vgic_init_maintenance_interrupt(NULL);
-		break;
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		disable_percpu_irq(vgic->maint_irq);
-		break;
-	}
-
-	return NOTIFY_OK;
+	disable_percpu_irq(vgic->maint_irq);
+	return 0;
 }
 
-static struct notifier_block vgic_cpu_nb = {
-	.notifier_call = vgic_cpu_notify,
-};
-
 static int kvm_vgic_probe(void)
 {
 	const struct gic_kvm_info *gic_kvm_info;
@@ -2392,19 +2378,10 @@
 		return ret;
 	}
 
-	ret = __register_cpu_notifier(&vgic_cpu_nb);
-	if (ret) {
-		kvm_err("Cannot register vgic CPU notifier\n");
-		goto out_free_irq;
-	}
-
-	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
+	cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_STARTING,
+			  "AP_KVM_ARM_VGIC_STARTING", vgic_starting_cpu,
+			  vgic_dying_cpu);
 	return 0;
-
-out_free_irq:
-	free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus());
-	return ret;
 }
 
 int kvm_irq_map_gsi(struct kvm *kvm,
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index a1442f7..2c7f0d5 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -353,32 +353,19 @@
 
 /* GENERIC PROBE */
 
-static void vgic_init_maintenance_interrupt(void *info)
+static int vgic_init_cpu_starting(unsigned int cpu)
 {
 	enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0);
+	return 0;
 }
 
-static int vgic_cpu_notify(struct notifier_block *self,
-			   unsigned long action, void *cpu)
+
+static int vgic_init_cpu_dying(unsigned int cpu)
 {
-	switch (action) {
-	case CPU_STARTING:
-	case CPU_STARTING_FROZEN:
-		vgic_init_maintenance_interrupt(NULL);
-		break;
-	case CPU_DYING:
-	case CPU_DYING_FROZEN:
-		disable_percpu_irq(kvm_vgic_global_state.maint_irq);
-		break;
-	}
-
-	return NOTIFY_OK;
+	disable_percpu_irq(kvm_vgic_global_state.maint_irq);
+	return 0;
 }
 
-static struct notifier_block vgic_cpu_nb = {
-	.notifier_call = vgic_cpu_notify,
-};
-
 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
 {
 	/*
@@ -434,14 +421,14 @@
 		return ret;
 	}
 
-	ret = __register_cpu_notifier(&vgic_cpu_nb);
+	ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
+				"AP_KVM_ARM_VGIC_INIT_STARTING",
+				vgic_init_cpu_starting, vgic_init_cpu_dying);
 	if (ret) {
 		kvm_err("Cannot register vgic CPU notifier\n");
 		goto out_free_irq;
 	}
 
-	on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
 	kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq);
 	return 0;
 
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ce3d8e5..2e79136 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -3155,12 +3155,13 @@
 	}
 }
 
-static void hardware_enable(void)
+static int kvm_starting_cpu(unsigned int cpu)
 {
 	raw_spin_lock(&kvm_count_lock);
 	if (kvm_usage_count)
 		hardware_enable_nolock(NULL);
 	raw_spin_unlock(&kvm_count_lock);
+	return 0;
 }
 
 static void hardware_disable_nolock(void *junk)
@@ -3173,12 +3174,13 @@
 	kvm_arch_hardware_disable();
 }
 
-static void hardware_disable(void)
+static int kvm_dying_cpu(unsigned int cpu)
 {
 	raw_spin_lock(&kvm_count_lock);
 	if (kvm_usage_count)
 		hardware_disable_nolock(NULL);
 	raw_spin_unlock(&kvm_count_lock);
+	return 0;
 }
 
 static void hardware_disable_all_nolock(void)
@@ -3219,21 +3221,6 @@
 	return r;
 }
 
-static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
-			   void *v)
-{
-	val &= ~CPU_TASKS_FROZEN;
-	switch (val) {
-	case CPU_DYING:
-		hardware_disable();
-		break;
-	case CPU_STARTING:
-		hardware_enable();
-		break;
-	}
-	return NOTIFY_OK;
-}
-
 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
 		      void *v)
 {
@@ -3500,10 +3487,6 @@
 	return r;
 }
 
-static struct notifier_block kvm_cpu_notifier = {
-	.notifier_call = kvm_cpu_hotplug,
-};
-
 static int kvm_debugfs_open(struct inode *inode, struct file *file,
 			   int (*get)(void *, u64 *), int (*set)(void *, u64),
 			   const char *fmt)
@@ -3754,7 +3737,8 @@
 			goto out_free_1;
 	}
 
-	r = register_cpu_notifier(&kvm_cpu_notifier);
+	r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING",
+				      kvm_starting_cpu, kvm_dying_cpu);
 	if (r)
 		goto out_free_2;
 	register_reboot_notifier(&kvm_reboot_notifier);
@@ -3808,7 +3792,7 @@
 	kmem_cache_destroy(kvm_vcpu_cache);
 out_free_3:
 	unregister_reboot_notifier(&kvm_reboot_notifier);
-	unregister_cpu_notifier(&kvm_cpu_notifier);
+	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
 out_free_2:
 out_free_1:
 	kvm_arch_hardware_unsetup();
@@ -3831,7 +3815,7 @@
 	kvm_async_pf_deinit();
 	unregister_syscore_ops(&kvm_syscore_ops);
 	unregister_reboot_notifier(&kvm_reboot_notifier);
-	unregister_cpu_notifier(&kvm_cpu_notifier);
+	cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
 	on_each_cpu(hardware_disable_nolock, NULL, 1);
 	kvm_arch_hardware_unsetup();
 	kvm_arch_exit();