From: Hu Zhaodong
ohos inclusion
category: feature
issue: #I4TNS2
CVE: NA
Signed-off-by: Hu Zhaodong
-------------------------------------------
allow migrating running rt task in clock interrupt
Signed-off-by: gaochao
---
include/linux/sched/sysctl.h | 4 ++
init/Kconfig | 7 +++
kernel/sched/rt.c | 90 ++++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 7 +++
kernel/sysctl.c | 9 ++++
5 files changed, 117 insertions(+)
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 210909cd4141..acec3b1fd469 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -41,6 +41,10 @@ sysctl_sched_walt_init_task_load_pct_sysctl_handler(struct ctl_table *table,
int write, void __user *buffer, size_t *length, loff_t *ppos);
#endif
+#ifdef CONFIG_SCHED_RT_ACTIVE_LB
+extern unsigned int sysctl_sched_enable_rt_active_lb;
+#endif
+
enum sched_tunable_scaling {
SCHED_TUNABLESCALING_NONE,
SCHED_TUNABLESCALING_LOG,
diff --git a/init/Kconfig b/init/Kconfig
index db7449e779c6..57554d795040 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -866,6 +866,13 @@ config SCHED_EAS
help
Check and migrate the CFS process to a more suitable CPU in the tick.
+config SCHED_RT_ACTIVE_LB
+ bool "RT Capacity Aware Misfit Task"
+ depends on SCHED_EAS
+ default n
+ help
+ Check and migrate the RT process to a more suitable CPU in the tick.
+
endmenu
#
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 6c1475950441..9adcbf0e0bee 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -17,6 +17,10 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
struct rt_bandwidth def_rt_bandwidth;
+#ifdef CONFIG_SCHED_RT_ACTIVE_LB
+unsigned int sysctl_sched_enable_rt_active_lb = 1;
+#endif
+
static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
{
struct rt_bandwidth *rt_b =
@@ -2443,6 +2447,89 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
}
}
+#ifdef CONFIG_SCHED_RT_ACTIVE_LB
+static int rt_active_load_balance_cpu_stop(void *data)
+{
+ struct rq *busiest_rq = data;
+ struct task_struct *next_task = busiest_rq->rt_push_task;
+ struct rq *lowest_rq = NULL;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&busiest_rq->lock, flags);
+ busiest_rq->rt_active_balance = 0;
+
+ /* find_lock_lowest_rq locks the rq if found */
+ lowest_rq = find_lock_lowest_rq(next_task, busiest_rq);
+ if (!lowest_rq)
+ goto out;
+
+ if (capacity_orig_of(cpu_of(lowest_rq)) <= capacity_orig_of(task_cpu(next_task)))
+ goto unlock;
+
+ deactivate_task(busiest_rq, next_task, 0);
+ set_task_cpu(next_task, lowest_rq->cpu);
+ activate_task(lowest_rq, next_task, 0);
+
+ resched_curr(lowest_rq);
+unlock:
+ double_unlock_balance(busiest_rq, lowest_rq);
+out:
+ put_task_struct(next_task);
+ raw_spin_unlock_irqrestore(&busiest_rq->lock, flags);
+
+ return 0;
+}
+
+void check_for_migration_rt(struct rq *rq, struct task_struct *p)
+{
+ bool need_actvie_lb = false;
+ bool misfit_task = false;
+ int cpu = task_cpu(p);
+ unsigned long cpu_orig_cap;
+#ifdef CONFIG_SCHED_RTG
+ struct cpumask *rtg_target = NULL;
+#endif
+
+ if (!sysctl_sched_enable_rt_active_lb)
+ return;
+
+ if (p->nr_cpus_allowed == 1)
+ return;
+
+ cpu_orig_cap = capacity_orig_of(cpu);
+ /* cpu has max capacity, no need to do balance */
+ if (cpu_orig_cap == rq->rd->max_cpu_capacity)
+ return;
+
+#ifdef CONFIG_SCHED_RTG
+ rtg_target = find_rtg_target(p);
+ if (rtg_target)
+ misfit_task = capacity_orig_of(cpumask_first(rtg_target)) >
+ cpu_orig_cap;
+ else
+ misfit_task = !rt_task_fits_capacity(p, cpu);
+#else
+ misfit_task = !rt_task_fits_capacity(p, cpu);
+#endif
+
+ if (misfit_task) {
+ raw_spin_lock(&rq->lock);
+ if (!rq->active_balance && !rq->rt_active_balance) {
+ rq->rt_active_balance = 1;
+ rq->rt_push_task = p;
+ get_task_struct(p);
+ need_actvie_lb = true;
+ }
+ raw_spin_unlock(&rq->lock);
+
+ if (need_actvie_lb)
+ stop_one_cpu_nowait(task_cpu(p),
+ rt_active_load_balance_cpu_stop,
+ rq, &rq->rt_active_balance_work);
+ }
+}
+#endif
+
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
@@ -2491,6 +2578,9 @@ const struct sched_class rt_sched_class
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = fixup_walt_sched_stats_common,
#endif
+#ifdef CONFIG_SCHED_RT_ACTIVE_LB
+ .check_for_migration = check_for_migration_rt,
+#endif
};
#ifdef CONFIG_RT_GROUP_SCHED
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1a4f1806eb78..09ad491bed45 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1052,6 +1052,13 @@ struct rq {
#endif
struct cpu_stop_work active_balance_work;
+ /* For rt active balancing */
+#ifdef CONFIG_SCHED_RT_ACTIVE_LB
+ int rt_active_balance;
+ struct task_struct *rt_push_task;
+ struct cpu_stop_work rt_active_balance_work;
+#endif
+
/* CPU of this runqueue: */
int cpu;
int online;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f13b9e456f50..d5fef7aba276 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1659,6 +1659,15 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
+#ifdef CONFIG_SCHED_RT_ACTIVE_LB
+ {
+ .procname = "sched_enable_rt_active_lb",
+ .data = &sysctl_sched_enable_rt_active_lb,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif
#ifdef CONFIG_SCHED_WALT
{
.procname = "sched_use_walt_cpu_util",
--
2.25.1