summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-03-11 16:03:42 +0100
committerDavid S. Miller <davem@davemloft.net>2022-03-14 10:09:28 +0000
commitfbd9a2ceba5c74bbfa19cf257ae4b4b2c820860d (patch)
treed0d852fb3a532af0b709b993879ecfe758bd0d71
parentd96657dc9238f8e9bda47b377e17e7c6f90935af (diff)
net: Add lockdep asserts to ____napi_schedule().
____napi_schedule() needs to be invoked with disabled interrupts due to __raise_softirq_irqoff (in order not to corrupt the per-CPU list). ____napi_schedule() needs also to be invoked from an interrupt context so that the raised-softirq is processed while the interrupt context is left. Add lockdep asserts for both conditions. While this is the second time the irq/softirq check is needed, provide a generic lockdep_assert_softirq_will_run() which is used by both caller. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--net/core/dev.c5
2 files changed, 11 insertions, 1 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 467b94257105..0cc65d216701 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -329,6 +329,12 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_assert_none_held_once() \
lockdep_assert_once(!current->lockdep_depth)
+/*
+ * Ensure that softirq is handled within the callchain and not delayed and
+ * handled by chance.
+ */
+#define lockdep_assert_softirq_will_run() \
+ lockdep_assert_once(hardirq_count() | softirq_count())
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
@@ -414,6 +420,7 @@ extern int lockdep_is_held(const void *);
#define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0)
#define lockdep_assert_none_held_once() do { } while (0)
+#define lockdep_assert_softirq_will_run() do { } while (0)
#define lockdep_recursing(tsk) (0)
diff --git a/net/core/dev.c b/net/core/dev.c
index 8d25ec5b3af7..75bab5b0dbae 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4265,6 +4265,9 @@ static inline void ____napi_schedule(struct softnet_data *sd,
{
struct task_struct *thread;
+ lockdep_assert_softirq_will_run();
+ lockdep_assert_irqs_disabled();
+
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
/* Paired with smp_mb__before_atomic() in
* napi_enable()/dev_set_threaded().
@@ -4872,7 +4875,7 @@ int __netif_rx(struct sk_buff *skb)
{
int ret;
- lockdep_assert_once(hardirq_count() | softirq_count());
+ lockdep_assert_softirq_will_run();
trace_netif_rx_entry(skb);
ret = netif_rx_internal(skb);