summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCon Kolivas <kernel@kolivas.org>2016-11-05 08:51:39 +1100
committerCon Kolivas <kernel@kolivas.org>2016-11-05 09:01:05 +1100
commitdfbb56f20f68df2ca745154e3cf25735cc283d05 (patch)
tree884e11bcbc037b9c52ee7c2db0698e1cb610031f
parent8d883c1c15f1aa3df45c3e8e0b439dab7211f3b6 (diff)
Kick out of the idle loop if there are softirqs pending, avoiding trying to call nohz idle and service them on exit from schedule.
-rw-r--r--kernel/sched/MuQSS.c15
-rw-r--r--kernel/sched/MuQSS.h15
-rw-r--r--kernel/sched/idle.c10
-rw-r--r--kernel/sched/sched.h12
4 files changed, 36 insertions, 16 deletions
diff --git a/kernel/sched/MuQSS.c b/kernel/sched/MuQSS.c
index 33f649a77a66..17afdf88cdab 100644
--- a/kernel/sched/MuQSS.c
+++ b/kernel/sched/MuQSS.c
@@ -752,6 +752,13 @@ static inline bool task_queued(struct task_struct *p)
static void enqueue_task(struct rq *rq, struct task_struct *p, int flags);
static inline void resched_if_idle(struct rq *rq);
+/* Dodgy workaround till we figure out where the softirqs are going */
+static inline void do_pending_softirq(struct rq *rq, struct task_struct *next)
+{
+ if (unlikely(next == rq->idle && local_softirq_pending() && !in_interrupt()))
+ do_softirq_own_stack();
+}
+
static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
{
#ifdef CONFIG_SMP
@@ -808,9 +815,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
#endif
rq_unlock(rq);
- /* Dodgy workaround till we figure out where the softirqs are going */
- if (unlikely(current == rq->idle && local_softirq_pending() && !in_interrupt()))
- do_softirq_own_stack();
+ do_pending_softirq(rq, current);
local_irq_enable();
}
@@ -3852,7 +3857,9 @@ static void __sched notrace __schedule(bool preempt)
context_switch(rq, prev, next); /* unlocks the rq */
} else {
check_siblings(rq);
- rq_unlock_irq(rq);
+ rq_unlock(rq);
+ do_pending_softirq(rq, next);
+ local_irq_enable();
}
}
diff --git a/kernel/sched/MuQSS.h b/kernel/sched/MuQSS.h
index f9510d739777..3565a7d8a5ee 100644
--- a/kernel/sched/MuQSS.h
+++ b/kernel/sched/MuQSS.h
@@ -1,5 +1,6 @@
#include <linux/sched.h>
#include <linux/cpuidle.h>
+#include <linux/interrupt.h>
#include <linux/skip_list.h>
#include <linux/stop_machine.h>
#include "cpuacct.h"
@@ -325,4 +326,18 @@ static inline void cpufreq_trigger(u64 time, unsigned long util)
#define arch_scale_freq_invariant() (false)
#endif
+/*
+ * This should only be called when current == rq->idle. Dodgy workaround for
+ * when softirqs are pending and we are in the idle loop. Setting current to
+ * resched will kick us out of the idle loop and the softirqs will be serviced
+ * on our next pass through schedule().
+ */
+static inline bool softirq_pending(int cpu)
+{
+ if (likely(!local_softirq_pending()))
+ return false;
+ set_tsk_need_resched(current);
+ return true;
+}
+
#endif /* MUQSS_SCHED_H */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 6e4cf75c9b6a..5353ffbc7194 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -206,6 +206,8 @@ exit_idle:
static void cpu_idle_loop(void)
{
while (1) {
+ bool pending = false;
+
/*
* If the arch has a polling bit, we maintain an invariant:
*
@@ -217,7 +219,10 @@ static void cpu_idle_loop(void)
__current_set_polling();
quiet_vmstat();
- tick_nohz_idle_enter();
+ if (unlikely(softirq_pending(cpu)))
+ pending = true;
+ else
+ tick_nohz_idle_enter();
while (!need_resched()) {
check_pgt_cache();
@@ -257,7 +262,8 @@ static void cpu_idle_loop(void)
* not have had an IPI to fold the state for us.
*/
preempt_set_need_resched();
- tick_nohz_idle_exit();
+ if (!pending)
+ tick_nohz_idle_exit();
__current_clr_polling();
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 898c0d2f18fe..b7935d2bfb97 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1810,15 +1810,7 @@ static inline void cpufreq_trigger_update(u64 time) {}
#define arch_scale_freq_invariant() (false)
#endif
-static inline void account_reset_rq(struct rq *rq)
+static inline bool softirq_pending(int cpu)
{
-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
- rq->prev_irq_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT
- rq->prev_steal_time = 0;
-#endif
-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
- rq->prev_steal_time_rq = 0;
-#endif
+ return false;
}