cpu-boost: Rework scheduling setup
This patch minimizes the latency of an input
boost by creating a dedicated kworker and scheduling
it with the realtime policy "SCHED_FIFO".
We use "MAX_RT_PRIO - 2" to have the input
boost task preempt userspace RT tasks
if needed while being preempted by kernel
internal RT tasks which are scheduled at
"MAX_RT_PRIO - 1" or higher.
Also since now the cpu-boost wq would
only handle the work to disable a running
input boost, which isn't latency critical,
we break up the cpu-boost wq and move the
work to disable a running input boost
to the shared regular wq.
[Changed kthread method names for 4.9 kernel.]
Change-Id: I846cc5d6f7143cff2f3318aefba5f2d9ed65b86d
diff --git a/drivers/cpufreq/cpu-boost.c b/drivers/cpufreq/cpu-boost.c
index 6a4008c..a1ce3b9 100644
--- a/drivers/cpufreq/cpu-boost.c
+++ b/drivers/cpufreq/cpu-boost.c
@@ -1,5 +1,7 @@
/*
* Copyright (c) 2013-2015,2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017, Paranoid Android.
+ * Copyright (C) 2017, Razer Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,12 +19,15 @@
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/cpu.h>
+#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/time.h>
+#include <linux/sched/rt.h>
+
struct cpu_sync {
int cpu;
unsigned int input_boost_min;
@@ -30,9 +35,8 @@ struct cpu_sync {
};
static DEFINE_PER_CPU(struct cpu_sync, sync_info);
-static struct workqueue_struct *cpu_boost_wq;
-static struct work_struct input_boost_work;
+static struct kthread_work input_boost_work;
static bool input_boost_enabled;
@@ -46,6 +50,12 @@ static bool sched_boost_active;
static struct delayed_work input_boost_rem;
static u64 last_input_time;
+
+// alex.naidis@paranoidandroid.co Rework scheduling setup - start
+static struct kthread_worker cpu_boost_worker;
+static struct task_struct *cpu_boost_worker_thread;
+// alex.naidis@paranoidandroid.co Rework scheduling setup - end
+
#define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
static int set_input_boost_freq(const char *buf, const struct kernel_param *kp)
@@ -187,7 +197,7 @@ static void do_input_boost_rem(struct work_struct *work)
}
}
-static void do_input_boost(struct work_struct *work)
+static void do_input_boost(struct kthread_work *work)
{
unsigned int i, ret;
struct cpu_sync *i_sync_info;
@@ -217,8 +227,7 @@ static void do_input_boost(struct work_struct *work)
sched_boost_active = true;
}
- queue_delayed_work(cpu_boost_wq, &input_boost_rem,
- msecs_to_jiffies(input_boost_ms));
+ schedule_delayed_work(&input_boost_rem, msecs_to_jiffies(input_boost_ms));
}
static void cpuboost_input_event(struct input_handle *handle,
@@ -233,10 +242,12 @@ static void cpuboost_input_event(struct input_handle *handle,
if (now - last_input_time < MIN_INPUT_INTERVAL)
return;
- if (work_pending(&input_boost_work))
+ if (queuing_blocked(&cpu_boost_worker, &input_boost_work))
return;
- queue_work(cpu_boost_wq, &input_boost_work);
+ // alex.naidis@paranoidandroid.co Rework scheduling setup - start
+ kthread_queue_work(&cpu_boost_worker, &input_boost_work);
+ // alex.naidis@paranoidandroid.co Rework scheduling setup - end
last_input_time = ktime_to_us(ktime_get());
}
@@ -315,12 +326,19 @@ static int cpu_boost_init(void)
{
int cpu, ret;
struct cpu_sync *s;
+ // alex.naidis@paranoidandroid.co Rework scheduling setup - start
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 2 };
- cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
- if (!cpu_boost_wq)
+ kthread_init_worker(&cpu_boost_worker);
+ cpu_boost_worker_thread = kthread_run(kthread_worker_fn,
+ &cpu_boost_worker, "cpu_boost_worker_thread");
+ if (IS_ERR(cpu_boost_worker_thread))
return -EFAULT;
- INIT_WORK(&input_boost_work, do_input_boost);
+ sched_setscheduler(cpu_boost_worker_thread, SCHED_FIFO, ¶m);
+
+ kthread_init_work(&input_boost_work, do_input_boost);
+ // alex.naidis@paranoidandroid.co Rework scheduling setup - end
INIT_DELAYED_WORK(&input_boost_rem, do_input_boost_rem);
for_each_possible_cpu(cpu) {
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index a6e82a6..7c43451 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -168,6 +168,19 @@ extern void __kthread_init_worker(struct kthread_worker *worker,
TIMER_IRQSAFE); \
} while (0)
+/*
+ * Returns true when the work could not be queued at the moment.
+ * It happens when it is already pending in a worker list
+ * or when it is being cancelled.
+ */
+static inline bool queuing_blocked(struct kthread_worker *worker,
+ struct kthread_work *work)
+{
+ lockdep_assert_held(&worker->lock);
+
+ return !list_empty(&work->node) || work->canceling;
+}
+
int kthread_worker_fn(void *worker_ptr);
__printf(2, 3)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index c421630..f06ef0d6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -736,19 +736,6 @@ kthread_create_worker_on_cpu(int cpu, unsigned int flags,
}
EXPORT_SYMBOL(kthread_create_worker_on_cpu);
-/*
- * Returns true when the work could not be queued at the moment.
- * It happens when it is already pending in a worker list
- * or when it is being cancelled.
- */
-static inline bool queuing_blocked(struct kthread_worker *worker,
- struct kthread_work *work)
-{
- lockdep_assert_held(&worker->lock);
-
- return !list_empty(&work->node) || work->canceling;
-}
-
static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
struct kthread_work *work)
{