From: Srinath Sridharan
ohos inclusion
category: feature
issue: #I4TNS2
CVE: NA
Signed-off-by: Hu Zhaodong
-------------------------------------------
EAS scheduler optimization
Allow running cfs task migration in clock interrupt
Signed-off-by: Satya Durga Srinivasu Prabhala
Signed-off-by: Vikram Mulukutla
Signed-off-by: Srinath Sridharan
---
init/Kconfig | 6 ++
kernel/sched/core.c | 35 +++++++++++
kernel/sched/fair.c | 141 ++++++++++++++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 7 +++
4 files changed, 188 insertions(+), 1 deletion(-)
diff --git a/init/Kconfig b/init/Kconfig
index 1d248e9c5a89..db7449e779c6 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -860,6 +860,12 @@ config UCLAMP_BUCKETS_COUNT
source "kernel/sched/rtg/Kconfig"
+config SCHED_EAS
+ bool "EAS scheduler optimization"
+ default n
+ help
+ Check and migrate the CFS process to a more suitable CPU in the tick.
+
endmenu
#
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8e506f6efc73..471b2129ea84 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4134,6 +4134,11 @@ void scheduler_tick(void)
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
trigger_load_balance(rq);
+
+#ifdef CONFIG_SCHED_EAS
+ if (curr->sched_class->check_for_migration)
+ curr->sched_class->check_for_migration(rq, curr);
+#endif
#endif
}
@@ -7025,6 +7030,32 @@ void migrate_tasks(struct rq *dead_rq, struct rq_flags *rf,
attach_tasks_core(&tasks, rq);
}
+#ifdef CONFIG_SCHED_EAS
+static void clear_eas_migration_request(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ clear_reserved(cpu);
+ if (rq->push_task) {
+ struct task_struct *push_task = NULL;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (rq->push_task) {
+ clear_reserved(rq->push_cpu);
+ push_task = rq->push_task;
+ rq->push_task = NULL;
+ }
+ rq->active_balance = 0;
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ if (push_task)
+ put_task_struct(push_task);
+ }
+}
+#else
+static inline void clear_eas_migration_request(int cpu) {}
+#endif
+
#ifdef CONFIG_CPU_ISOLATION_OPT
int do_isolation_work_cpu_stop(void *data)
{
@@ -7058,6 +7089,7 @@ int do_isolation_work_cpu_stop(void *data)
set_rq_online(rq);
rq_unlock(rq, &rf);
+ clear_eas_migration_request(cpu);
local_irq_enable();
return 0;
}
@@ -7425,6 +7457,7 @@ int sched_cpu_starting(unsigned int cpu)
{
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
+ clear_eas_migration_request(cpu);
return 0;
}
@@ -7447,6 +7480,8 @@ int sched_cpu_dying(unsigned int cpu)
BUG_ON(rq->nr_running != 1);
rq_unlock_irqrestore(rq, &rf);
+ clear_eas_migration_request(cpu);
+
calc_load_migrate(rq);
update_max_interval();
nohz_balance_exit_idle(rq);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3b8d6c1dfc30..9789a385fecd 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10110,9 +10110,13 @@ static int active_load_balance_cpu_stop(void *data)
int busiest_cpu = cpu_of(busiest_rq);
int target_cpu = busiest_rq->push_cpu;
struct rq *target_rq = cpu_rq(target_cpu);
- struct sched_domain *sd;
+ struct sched_domain *sd = NULL;
struct task_struct *p = NULL;
struct rq_flags rf;
+#ifdef CONFIG_SCHED_EAS
+ struct task_struct *push_task;
+ int push_task_detached = 0;
+#endif
rq_lock_irq(busiest_rq, &rf);
/*
@@ -10139,6 +10143,32 @@ static int active_load_balance_cpu_stop(void *data)
*/
BUG_ON(busiest_rq == target_rq);
+#ifdef CONFIG_SCHED_EAS
+ push_task = busiest_rq->push_task;
+ target_cpu = busiest_rq->push_cpu;
+ if (push_task) {
+ struct lb_env env = {
+ .sd = sd,
+ .dst_cpu = target_cpu,
+ .dst_rq = target_rq,
+ .src_cpu = busiest_rq->cpu,
+ .src_rq = busiest_rq,
+ .idle = CPU_IDLE,
+ .flags = 0,
+ .loop = 0,
+ };
+ if (task_on_rq_queued(push_task) &&
+ push_task->state == TASK_RUNNING &&
+ task_cpu(push_task) == busiest_cpu &&
+ cpu_online(target_cpu)) {
+ update_rq_clock(busiest_rq);
+ detach_task(push_task, &env);
+ push_task_detached = 1;
+ }
+ goto out_unlock;
+ }
+#endif
+
/* Search for an sd spanning us and the target CPU. */
rcu_read_lock();
for_each_domain(target_cpu, sd) {
@@ -10178,8 +10208,23 @@ static int active_load_balance_cpu_stop(void *data)
rcu_read_unlock();
out_unlock:
busiest_rq->active_balance = 0;
+
+#ifdef CONFIG_SCHED_EAS
+ push_task = busiest_rq->push_task;
+ if (push_task)
+ busiest_rq->push_task = NULL;
+#endif
rq_unlock(busiest_rq, &rf);
+#ifdef CONFIG_SCHED_EAS
+ if (push_task) {
+ if (push_task_detached)
+ attach_one_task(target_rq, push_task);
+
+ put_task_struct(push_task);
+ }
+#endif
+
if (p)
attach_one_task(target_rq, p);
@@ -10979,6 +11024,97 @@ static void rq_offline_fair(struct rq *rq)
unthrottle_offline_cfs_rqs(rq);
}
+#ifdef CONFIG_SCHED_EAS
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+ unsigned long flags;
+ int rc = 0;
+
+ if (cpu_of(rq) == new_cpu)
+ return rc;
+
+ /* Invoke active balance to force migrate currently running task */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (!rq->active_balance) {
+ rq->active_balance = 1;
+ rq->push_cpu = new_cpu;
+ get_task_struct(p);
+ rq->push_task = p;
+ rc = 1;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ return rc;
+}
+
+DEFINE_RAW_SPINLOCK(migration_lock);
+void check_for_migration_fair(struct rq *rq, struct task_struct *p)
+{
+ int active_balance;
+ int new_cpu = -1;
+ int prev_cpu = task_cpu(p);
+ int ret;
+
+#ifdef CONFIG_SCHED_RTG
+ bool need_down_migrate = false;
+ struct cpumask *rtg_target = find_rtg_target(p);
+
+ if (rtg_target &&
+ (capacity_orig_of(prev_cpu) >
+ capacity_orig_of(cpumask_first(rtg_target))))
+ need_down_migrate = true;
+#endif
+
+ if (rq->misfit_task_load) {
+ if (rq->curr->state != TASK_RUNNING ||
+ rq->curr->nr_cpus_allowed == 1)
+ return;
+
+ raw_spin_lock(&migration_lock);
+#ifdef CONFIG_SCHED_RTG
+ if (rtg_target) {
+ new_cpu = find_rtg_cpu(p);
+
+ if (new_cpu != -1 && need_down_migrate &&
+ cpumask_test_cpu(new_cpu, rtg_target) &&
+ idle_cpu(new_cpu))
+ goto do_active_balance;
+
+ if (new_cpu != -1 &&
+ capacity_orig_of(new_cpu) > capacity_orig_of(prev_cpu))
+ goto do_active_balance;
+
+ goto out_unlock;
+ }
+#endif
+ rcu_read_lock();
+ new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+ rcu_read_unlock();
+
+ if (new_cpu == -1 ||
+ capacity_orig_of(new_cpu) <= capacity_orig_of(prev_cpu))
+ goto out_unlock;
+#ifdef CONFIG_SCHED_RTG
+do_active_balance:
+#endif
+ active_balance = kick_active_balance(rq, p, new_cpu);
+ if (active_balance) {
+ mark_reserved(new_cpu);
+ raw_spin_unlock(&migration_lock);
+ ret = stop_one_cpu_nowait(prev_cpu,
+ active_load_balance_cpu_stop, rq,
+ &rq->active_balance_work);
+ if (!ret)
+ clear_reserved(new_cpu);
+ else
+ wake_up_if_idle(new_cpu);
+ return;
+ }
+out_unlock:
+ raw_spin_unlock(&migration_lock);
+ }
+}
+#endif /* CONFIG_SCHED_EAS */
#endif /* CONFIG_SMP */
/*
@@ -11530,6 +11666,9 @@ const struct sched_class fair_sched_class
#ifdef CONFIG_SCHED_WALT
.fixup_walt_sched_stats = walt_fixup_sched_stats_fair,
#endif
+#ifdef CONFIG_SCHED_EAS
+ .check_for_migration = check_for_migration_fair,
+#endif
};
#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index d79744dcc048..1a4f1806eb78 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1047,6 +1047,9 @@ struct rq {
/* For active balancing */
int active_balance;
int push_cpu;
+#ifdef CONFIG_SCHED_EAS
+ struct task_struct *push_task;
+#endif
struct cpu_stop_work active_balance_work;
/* CPU of this runqueue: */
@@ -1925,6 +1928,9 @@ struct sched_class {
void (*fixup_walt_sched_stats)(struct rq *rq, struct task_struct *p,
u16 updated_demand_scaled);
#endif
+#ifdef CONFIG_SCHED_EAS
+ void (*check_for_migration)(struct rq *rq, struct task_struct *p);
+#endif
} __aligned(STRUCT_ALIGNMENT); /* STRUCT_ALIGN(), vmlinux.lds.h */
static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
@@ -2768,6 +2774,7 @@ extern bool task_fits_max(struct task_struct *p, int cpu);
extern unsigned long capacity_spare_without(int cpu, struct task_struct *p);
extern int update_preferred_cluster(struct related_thread_group *grp,
struct task_struct *p, u32 old_load, bool from_tick);
+extern struct cpumask *find_rtg_target(struct task_struct *p);
#endif
#ifdef CONFIG_SCHED_WALT
--
2.25.1