summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2020-11-13 10:08:09 -0800
committerKent Overstreet <kent.overstreet@gmail.com>2020-12-10 18:24:53 -0500
commit763fee19acc3a40c6a9910066fcd9160179de455 (patch)
tree728eb4d0490a57f71b98ec8ec4a857e46615a0d0
parent4b6116096d0b327375bca90b9aa4d67dbfbea5ff (diff)
srcu: Provide internal interface to start a Tree SRCU grace period
There is a need for a polling interface for SRCU grace periods. This polling needs to initiate an SRCU grace period without having to queue (and manage) a callback. This commit therefore splits the Tree SRCU __call_srcu() function into callback-initialization and queuing/start-grace-period portions, with the latter in a new function named srcu_gp_start_if_needed(). This function may be passed a NULL callback pointer, in which case it will refrain from queuing anything. Why have the new function mess with queuing? Locking considerations, of course! Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/ Reported-by: Kent Overstreet <kent.overstreet@gmail.com> Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
-rw-r--r--kernel/rcu/srcutree.c62
1 files changed, 35 insertions, 27 deletions
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c
index 1ff17e297f0c..d4e102fdda49 100644
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
@@ -822,6 +822,40 @@ static void srcu_leak_callback(struct rcu_head *rhp)
}
/*
+ * Start an SRCU grace period, and also queue the callback if non-NULL.
+ */
+static void srcu_gp_start_if_needed(struct srcu_struct *sp, struct rcu_head *rhp, bool do_norm)
+{
+ unsigned long flags;
+ bool needexp = false;
+ bool needgp = false;
+ unsigned long s;
+ struct srcu_data *sdp;
+
+ local_irq_save(flags);
+ sdp = this_cpu_ptr(sp->sda);
+ spin_lock_rcu_node(sdp);
+ rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
+ rcu_segcblist_advance(&sdp->srcu_cblist,
+ rcu_seq_current(&sp->srcu_gp_seq));
+ s = rcu_seq_snap(&sp->srcu_gp_seq);
+ (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
+ if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
+ sdp->srcu_gp_seq_needed = s;
+ needgp = true;
+ }
+ if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
+ sdp->srcu_gp_seq_needed_exp = s;
+ needexp = true;
+ }
+ spin_unlock_irqrestore_rcu_node(sdp, flags);
+ if (needgp)
+ srcu_funnel_gp_start(sp, sdp, s, do_norm);
+ else if (needexp)
+ srcu_funnel_exp_start(sp, sdp->mynode, s);
+}
+
+/*
* Enqueue an SRCU callback on the srcu_data structure associated with
* the current CPU and the specified srcu_struct structure, initiating
* grace-period processing if it is not already running.
@@ -852,12 +886,6 @@ static void srcu_leak_callback(struct rcu_head *rhp)
void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
rcu_callback_t func, bool do_norm)
{
- unsigned long flags;
- bool needexp = false;
- bool needgp = false;
- unsigned long s;
- struct srcu_data *sdp;
-
check_init_srcu_struct(sp);
if (debug_rcu_head_queue(rhp)) {
/* Probable double call_srcu(), so leak the callback. */
@@ -866,27 +894,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
return;
}
rhp->func = func;
- local_irq_save(flags);
- sdp = this_cpu_ptr(sp->sda);
- spin_lock_rcu_node(sdp);
- rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
- rcu_segcblist_advance(&sdp->srcu_cblist,
- rcu_seq_current(&sp->srcu_gp_seq));
- s = rcu_seq_snap(&sp->srcu_gp_seq);
- (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
- if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
- sdp->srcu_gp_seq_needed = s;
- needgp = true;
- }
- if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
- sdp->srcu_gp_seq_needed_exp = s;
- needexp = true;
- }
- spin_unlock_irqrestore_rcu_node(sdp, flags);
- if (needgp)
- srcu_funnel_gp_start(sp, sdp, s, do_norm);
- else if (needexp)
- srcu_funnel_exp_start(sp, sdp->mynode, s);
+ srcu_gp_start_if_needed(sp, rhp, do_norm);
}
/**