summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2010-08-16 14:19:55 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2010-08-16 14:19:55 +1000
commitbbb57abe9d390dd57db719d345cee6b504f8f940 (patch)
treef23cf47a71f3c1bd4731c4ae9777bc82cf313551
parente90e2f3533aa0ee580b38b6541d048d0adab414f (diff)
parentfa7cd37f808c487f722d7a2ce9c0923a323922df (diff)
Merge remote branch 'lost-spurious-irq/lost-spurious-irq'
Conflicts: drivers/ata/sata_fsl.c drivers/ata/sata_mv.c drivers/ata/sata_nv.c include/linux/libata.h
-rw-r--r--arch/arm/mach-aaec2000/core.c2
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam926x_time.c2
-rw-r--r--arch/arm/mach-bcmring/core.c2
-rw-r--r--arch/arm/mach-clps711x/time.c2
-rw-r--r--arch/arm/mach-cns3xxx/core.c2
-rw-r--r--arch/arm/mach-ebsa110/core.c2
-rw-r--r--arch/arm/mach-ep93xx/core.c2
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c2
-rw-r--r--arch/arm/mach-footbridge/isa-timer.c2
-rw-r--r--arch/arm/mach-h720x/cpu-h7201.c2
-rw-r--r--arch/arm/mach-h720x/cpu-h7202.c2
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c2
-rw-r--r--arch/arm/mach-ixp2000/core.c2
-rw-r--r--arch/arm/mach-ixp23xx/core.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-lh7a40x/time.c2
-rw-r--r--arch/arm/mach-mmp/time.c2
-rw-r--r--arch/arm/mach-netx/time.c2
-rw-r--r--arch/arm/mach-ns9xxx/irq.c3
-rw-r--r--arch/arm/mach-ns9xxx/time-ns9360.c2
-rw-r--r--arch/arm/mach-nuc93x/time.c2
-rw-r--r--arch/arm/mach-omap1/time.c2
-rw-r--r--arch/arm/mach-omap1/timer32k.c2
-rw-r--r--arch/arm/mach-omap2/timer-gp.c2
-rw-r--r--arch/arm/mach-pnx4008/time.c2
-rw-r--r--arch/arm/mach-pxa/time.c2
-rw-r--r--arch/arm/mach-sa1100/time.c2
-rw-r--r--arch/arm/mach-shark/core.c2
-rw-r--r--arch/arm/mach-u300/timer.c2
-rw-r--r--arch/arm/mach-w90x900/time.c2
-rw-r--r--arch/arm/plat-iop/time.c2
-rw-r--r--arch/arm/plat-mxc/time.c2
-rw-r--r--arch/arm/plat-samsung/time.c2
-rw-r--r--arch/arm/plat-versatile/timer-sp.c2
-rw-r--r--arch/blackfin/kernel/time-ts.c6
-rw-r--r--arch/ia64/kernel/time.c2
-rw-r--r--arch/parisc/kernel/irq.c2
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c5
-rw-r--r--arch/x86/kernel/time.c2
-rw-r--r--drivers/ata/libata-core.c54
-rw-r--r--drivers/ata/libata-eh.c4
-rw-r--r--drivers/ata/libata-sff.c37
-rw-r--r--drivers/ata/sata_fsl.c8
-rw-r--r--drivers/ata/sata_mv.c25
-rw-r--r--drivers/ata/sata_nv.c57
-rw-r--r--drivers/clocksource/sh_cmt.c3
-rw-r--r--drivers/clocksource/sh_mtu2.c3
-rw-r--r--drivers/clocksource/sh_tmu.c3
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--include/linux/interrupt.h43
-rw-r--r--include/linux/irq.h40
-rw-r--r--include/linux/libata.h2
-rw-r--r--kernel/irq/chip.c20
-rw-r--r--kernel/irq/handle.c7
-rw-r--r--kernel/irq/internals.h10
-rw-r--r--kernel/irq/manage.c18
-rw-r--r--kernel/irq/proc.c5
-rw-r--r--kernel/irq/spurious.c978
59 files changed, 1062 insertions, 344 deletions
diff --git a/arch/arm/mach-aaec2000/core.c b/arch/arm/mach-aaec2000/core.c
index 3ef68330452a..ac8753539c4f 100644
--- a/arch/arm/mach-aaec2000/core.c
+++ b/arch/arm/mach-aaec2000/core.c
@@ -139,7 +139,7 @@ aaec2000_timer_interrupt(int irq, void *dev_id)
static struct irqaction aaec2000_timer_irq = {
.name = "AAEC-2000 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = aaec2000_timer_interrupt,
};
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 2500f41d8d2d..a4a00493ad9e 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -87,7 +87,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id)
static struct irqaction at91rm9200_timer_irq = {
.name = "at91_tick",
- .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER,
.handler = at91rm9200_timer_interrupt
};
diff --git a/arch/arm/mach-at91/at91sam926x_time.c b/arch/arm/mach-at91/at91sam926x_time.c
index 608a63240b64..22a290bdce24 100644
--- a/arch/arm/mach-at91/at91sam926x_time.c
+++ b/arch/arm/mach-at91/at91sam926x_time.c
@@ -123,7 +123,7 @@ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id)
static struct irqaction at91sam926x_pit_irq = {
.name = "at91_tick",
- .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_SHARED | IRQF_DISABLED | IRQF_TIMER,
.handler = at91sam926x_pit_interrupt
};
diff --git a/arch/arm/mach-bcmring/core.c b/arch/arm/mach-bcmring/core.c
index d3f959e92b2d..f6b132fa80be 100644
--- a/arch/arm/mach-bcmring/core.c
+++ b/arch/arm/mach-bcmring/core.c
@@ -275,7 +275,7 @@ static irqreturn_t bcmring_timer_interrupt(int irq, void *dev_id)
static struct irqaction bcmring_timer_irq = {
.name = "bcmring Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = bcmring_timer_interrupt,
};
diff --git a/arch/arm/mach-clps711x/time.c b/arch/arm/mach-clps711x/time.c
index d581ef0bcd24..9dffaa60c8d1 100644
--- a/arch/arm/mach-clps711x/time.c
+++ b/arch/arm/mach-clps711x/time.c
@@ -56,7 +56,7 @@ p720t_timer_interrupt(int irq, void *dev_id)
static struct irqaction clps711x_timer_irq = {
.name = "CLPS711x Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = p720t_timer_interrupt,
};
diff --git a/arch/arm/mach-cns3xxx/core.c b/arch/arm/mach-cns3xxx/core.c
index 9ca4d581016f..9dcea9f3a84e 100644
--- a/arch/arm/mach-cns3xxx/core.c
+++ b/arch/arm/mach-cns3xxx/core.c
@@ -178,7 +178,7 @@ static irqreturn_t cns3xxx_timer_interrupt(int irq, void *dev_id)
static struct irqaction cns3xxx_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = cns3xxx_timer_interrupt,
};
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index c7bc7fbb11a6..efb77343365b 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -195,7 +195,7 @@ ebsa110_timer_interrupt(int irq, void *dev_id)
static struct irqaction ebsa110_timer_irq = {
.name = "EBSA110 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = ebsa110_timer_interrupt,
};
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 4cb55d3902ff..52e0e613dbf2 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -133,7 +133,7 @@ static irqreturn_t ep93xx_timer_interrupt(int irq, void *dev_id)
static struct irqaction ep93xx_timer_irq = {
.name = "ep93xx timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = ep93xx_timer_interrupt,
};
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index bc5e83fb5819..42b0bd787d6f 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -41,7 +41,7 @@ timer1_interrupt(int irq, void *dev_id)
static struct irqaction footbridge_timer_irq = {
.name = "Timer1 timer tick",
.handler = timer1_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
};
/*
diff --git a/arch/arm/mach-footbridge/isa-timer.c b/arch/arm/mach-footbridge/isa-timer.c
index f488fa2082d7..ca1932e2a2c3 100644
--- a/arch/arm/mach-footbridge/isa-timer.c
+++ b/arch/arm/mach-footbridge/isa-timer.c
@@ -71,7 +71,7 @@ isa_timer_interrupt(int irq, void *dev_id)
static struct irqaction isa_timer_irq = {
.name = "ISA timer tick",
.handler = isa_timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
};
static void __init isa_timer_init(void)
diff --git a/arch/arm/mach-h720x/cpu-h7201.c b/arch/arm/mach-h720x/cpu-h7201.c
index 24df2a349a98..be1db54c6870 100644
--- a/arch/arm/mach-h720x/cpu-h7201.c
+++ b/arch/arm/mach-h720x/cpu-h7201.c
@@ -37,7 +37,7 @@ h7201_timer_interrupt(int irq, void *dev_id)
static struct irqaction h7201_timer_irq = {
.name = "h7201 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = h7201_timer_interrupt,
};
diff --git a/arch/arm/mach-h720x/cpu-h7202.c b/arch/arm/mach-h720x/cpu-h7202.c
index fd33a19c813a..e40deea5f5dc 100644
--- a/arch/arm/mach-h720x/cpu-h7202.c
+++ b/arch/arm/mach-h720x/cpu-h7202.c
@@ -166,7 +166,7 @@ static struct irq_chip h7202_timerx_chip = {
static struct irqaction h7202_timer_irq = {
.name = "h7202 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = h7202_timer_interrupt,
};
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index 6ab5a03ab9d8..3459da325406 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -449,7 +449,7 @@ static struct clock_event_device integrator_clockevent = {
static struct irqaction integrator_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = integrator_timer_interrupt,
.dev_id = &integrator_clockevent,
};
diff --git a/arch/arm/mach-ixp2000/core.c b/arch/arm/mach-ixp2000/core.c
index babb22597163..693275d1139d 100644
--- a/arch/arm/mach-ixp2000/core.c
+++ b/arch/arm/mach-ixp2000/core.c
@@ -213,7 +213,7 @@ static int ixp2000_timer_interrupt(int irq, void *dev_id)
static struct irqaction ixp2000_timer_irq = {
.name = "IXP2000 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = ixp2000_timer_interrupt,
};
diff --git a/arch/arm/mach-ixp23xx/core.c b/arch/arm/mach-ixp23xx/core.c
index aa4c4420ff3d..8967392b6a66 100644
--- a/arch/arm/mach-ixp23xx/core.c
+++ b/arch/arm/mach-ixp23xx/core.c
@@ -359,7 +359,7 @@ ixp23xx_timer_interrupt(int irq, void *dev_id)
static struct irqaction ixp23xx_timer_irq = {
.name = "IXP23xx Timer Tick",
.handler = ixp23xx_timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
};
void __init ixp23xx_init_timer(void)
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 0bce09799d18..9a574fb42f62 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -278,7 +278,7 @@ static irqreturn_t ixp4xx_timer_interrupt(int irq, void *dev_id)
static struct irqaction ixp4xx_timer_irq = {
.name = "timer1",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = ixp4xx_timer_interrupt,
.dev_id = &clockevent_ixp4xx,
};
diff --git a/arch/arm/mach-lh7a40x/time.c b/arch/arm/mach-lh7a40x/time.c
index 4601e425bae3..841fe8c518c5 100644
--- a/arch/arm/mach-lh7a40x/time.c
+++ b/arch/arm/mach-lh7a40x/time.c
@@ -49,7 +49,7 @@ lh7a40x_timer_interrupt(int irq, void *dev_id)
static struct irqaction lh7a40x_timer_irq = {
.name = "LHA740x Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = lh7a40x_timer_interrupt,
};
diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c
index 66528193f939..1faf6b15c7f0 100644
--- a/arch/arm/mach-mmp/time.c
+++ b/arch/arm/mach-mmp/time.c
@@ -177,7 +177,7 @@ static void __init timer_config(void)
static struct irqaction timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = timer_interrupt,
.dev_id = &ckevt,
};
diff --git a/arch/arm/mach-netx/time.c b/arch/arm/mach-netx/time.c
index 82801dbf0579..c0cc83697927 100644
--- a/arch/arm/mach-netx/time.c
+++ b/arch/arm/mach-netx/time.c
@@ -100,7 +100,7 @@ netx_timer_interrupt(int irq, void *dev_id)
static struct irqaction netx_timer_irq = {
.name = "NetX Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = netx_timer_interrupt,
};
diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c
index 038f24d47023..6be86e93b183 100644
--- a/arch/arm/mach-ns9xxx/irq.c
+++ b/arch/arm/mach-ns9xxx/irq.c
@@ -82,9 +82,6 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc)
action_ret = handle_IRQ_event(irq, action);
- /* XXX: There is no direct way to access noirqdebug, so check
- * unconditionally for spurious irqs...
- * Maybe this function should go to kernel/irq/chip.c? */
note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
diff --git a/arch/arm/mach-ns9xxx/time-ns9360.c b/arch/arm/mach-ns9xxx/time-ns9360.c
index 77281260358a..e60627f292d8 100644
--- a/arch/arm/mach-ns9xxx/time-ns9360.c
+++ b/arch/arm/mach-ns9xxx/time-ns9360.c
@@ -121,7 +121,7 @@ static irqreturn_t ns9360_clockevent_handler(int irq, void *dev_id)
static struct irqaction ns9360_clockevent_action = {
.name = "ns9360-timer" __stringify(TIMER_CLOCKEVENT),
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = ns9360_clockevent_handler,
};
diff --git a/arch/arm/mach-nuc93x/time.c b/arch/arm/mach-nuc93x/time.c
index 2f90f9dc6e30..8e0dbea8ec24 100644
--- a/arch/arm/mach-nuc93x/time.c
+++ b/arch/arm/mach-nuc93x/time.c
@@ -56,7 +56,7 @@ static irqreturn_t nuc93x_timer_interrupt(int irq, void *dev_id)
static struct irqaction nuc93x_timer_irq = {
.name = "nuc93x Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = nuc93x_timer_interrupt,
};
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 1be6a214d88d..c62fa799fe78 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -157,7 +157,7 @@ static irqreturn_t omap_mpu_timer1_interrupt(int irq, void *dev_id)
static struct irqaction omap_mpu_timer1_irq = {
.name = "mpu_timer1",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = omap_mpu_timer1_interrupt,
};
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 20cfbcc6c60c..8ad901b566f4 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -156,7 +156,7 @@ static irqreturn_t omap_32k_timer_interrupt(int irq, void *dev_id)
static struct irqaction omap_32k_timer_irq = {
.name = "32KHz timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = omap_32k_timer_interrupt,
};
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 74fbed8491f2..ddf9fae448b8 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -62,7 +62,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
static struct irqaction omap2_gp_timer_irq = {
.name = "gp timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = omap2_gp_timer_interrupt,
};
diff --git a/arch/arm/mach-pnx4008/time.c b/arch/arm/mach-pnx4008/time.c
index 0c8aad4bb0dc..1d5b2dae3183 100644
--- a/arch/arm/mach-pnx4008/time.c
+++ b/arch/arm/mach-pnx4008/time.c
@@ -80,7 +80,7 @@ static irqreturn_t pnx4008_timer_interrupt(int irq, void *dev_id)
static struct irqaction pnx4008_timer_irq = {
.name = "PNX4008 Tick Timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = pnx4008_timer_interrupt
};
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c
index 293e40aeaf29..9fa6e1a0b96f 100644
--- a/arch/arm/mach-pxa/time.c
+++ b/arch/arm/mach-pxa/time.c
@@ -133,7 +133,7 @@ static struct clocksource cksrc_pxa_oscr0 = {
static struct irqaction pxa_ost0_irq = {
.name = "ost0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = pxa_ost0_interrupt,
.dev_id = &ckevt_pxa_osmr0,
};
diff --git a/arch/arm/mach-sa1100/time.c b/arch/arm/mach-sa1100/time.c
index 74b6e0e570b6..7ec781d5249a 100644
--- a/arch/arm/mach-sa1100/time.c
+++ b/arch/arm/mach-sa1100/time.c
@@ -87,7 +87,7 @@ static struct clocksource cksrc_sa1100_oscr = {
static struct irqaction sa1100_timer_irq = {
.name = "ost0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = sa1100_ost0_interrupt,
.dev_id = &ckevt_sa1100_osmr0,
};
diff --git a/arch/arm/mach-shark/core.c b/arch/arm/mach-shark/core.c
index 358d875ace14..d1d6ea5e9fcf 100644
--- a/arch/arm/mach-shark/core.c
+++ b/arch/arm/mach-shark/core.c
@@ -130,7 +130,7 @@ shark_timer_interrupt(int irq, void *dev_id)
static struct irqaction shark_timer_irq = {
.name = "Shark Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = shark_timer_interrupt,
};
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index 3fc4472719be..9a7cfbfa7842 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -327,7 +327,7 @@ static irqreturn_t u300_timer_interrupt(int irq, void *dev_id)
static struct irqaction u300_timer_irq = {
.name = "U300 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = u300_timer_interrupt,
};
diff --git a/arch/arm/mach-w90x900/time.c b/arch/arm/mach-w90x900/time.c
index b80f769bc135..9cda8355e19d 100644
--- a/arch/arm/mach-w90x900/time.c
+++ b/arch/arm/mach-w90x900/time.c
@@ -111,7 +111,7 @@ static irqreturn_t nuc900_timer0_interrupt(int irq, void *dev_id)
static struct irqaction nuc900_timer0_irq = {
.name = "nuc900-timer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = nuc900_timer0_interrupt,
};
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 85d3e55ca4a9..f5f1a9d39dbe 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -127,7 +127,7 @@ iop_timer_interrupt(int irq, void *dev_id)
static struct irqaction iop_timer_irq = {
.name = "IOP Timer Tick",
.handler = iop_timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.dev_id = &iop_clockevent,
};
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index f9a1b059a76c..e5ca2bdaa92d 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -258,7 +258,7 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
static struct irqaction mxc_timer_irq = {
.name = "i.MX Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = mxc_timer_interrupt,
};
diff --git a/arch/arm/plat-samsung/time.c b/arch/arm/plat-samsung/time.c
index 2231d80ad817..133069ae6feb 100644
--- a/arch/arm/plat-samsung/time.c
+++ b/arch/arm/plat-samsung/time.c
@@ -138,7 +138,7 @@ s3c2410_timer_interrupt(int irq, void *dev_id)
static struct irqaction s3c2410_timer_irq = {
.name = "S3C2410 Timer Tick",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = s3c2410_timer_interrupt,
};
diff --git a/arch/arm/plat-versatile/timer-sp.c b/arch/arm/plat-versatile/timer-sp.c
index fb0d1c299718..62066b4b6af7 100644
--- a/arch/arm/plat-versatile/timer-sp.c
+++ b/arch/arm/plat-versatile/timer-sp.c
@@ -135,7 +135,7 @@ static struct clock_event_device sp804_clockevent = {
static struct irqaction sp804_timer_irq = {
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER,
.handler = sp804_timer_interrupt,
.dev_id = &sp804_clockevent,
};
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 8c9a43daf80f..6fefad4edd19 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -213,8 +213,7 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
static struct irqaction gptmr0_irq = {
.name = "Blackfin GPTimer0",
- .flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_PERCPU,
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
.handler = bfin_gptmr0_interrupt,
};
@@ -322,8 +321,7 @@ irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
static struct irqaction coretmr_irq = {
.name = "Blackfin CoreTimer",
- .flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_PERCPU,
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
.handler = bfin_coretmr_interrupt,
};
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index ed6f22eb5b12..f9dfb57aba56 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -411,7 +411,7 @@ static cycle_t itc_get_cycles(struct clocksource *cs)
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED,
.name = "timer"
};
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index efbcee5d2220..24681d553e13 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -383,7 +383,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
static struct irqaction timer_action = {
.handler = timer_interrupt,
.name = "timer",
- .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
+ .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_PERCPU,
};
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 10eb1a443626..e638fb3e9893 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -233,8 +233,6 @@ static int iic_host_match(struct irq_host *h, struct device_node *node)
"IBM,CBEA-Internal-Interrupt-Controller");
}
-extern int noirqdebug;
-
static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
@@ -267,8 +265,7 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_PENDING;
raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index fb5cc5e14cfa..476e2fe402b5 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -88,7 +88,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
static struct irqaction irq0 = {
.handler = timer_interrupt,
- .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
+ .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
.name = "timer"
};
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 7ef7c4f216fa..21a883848279 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4939,17 +4939,7 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
}
-/**
- * ata_qc_complete - Complete an active ATA command
- * @qc: Command to complete
- *
- * Indicate to the mid and upper layers that an ATA
- * command has completed, with either an ok or not-ok status.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_qc_complete(struct ata_queued_cmd *qc)
+static void ata_qc_complete_raw(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -5028,6 +5018,27 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
}
/**
+ * ata_qc_complete - Complete an active ATA command
+ * @qc: Command to complete
+ *
+ * Indicate to the mid and upper layers that an ATA command has
+ * completed, with either an ok or not-ok status.
+ *
+ * Refrain from calling this function multiple times when
+ * successfully completing multiple NCQ commands.
+ * ata_qc_complete_multiple() should be used instead, which will
+ * properly update IRQ expect state.
+ *
+ * LOCKING:
+ * spin_lock_irqsave(host lock)
+ */
+void ata_qc_complete(struct ata_queued_cmd *qc)
+{
+ unexpect_irq(qc->ap->irq_expect, false);
+ ata_qc_complete_raw(qc);
+}
+
+/**
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
@@ -5037,6 +5048,10 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
* requests normally. ap->qc_active and @qc_active is compared
* and commands are completed accordingly.
*
+ * Always use this function when completing multiple NCQ commands
+ * from IRQ handlers instead of calling ata_qc_complete()
+ * multiple times to keep IRQ expect status properly in sync.
+ *
* LOCKING:
* spin_lock_irqsave(host lock)
*
@@ -5048,6 +5063,8 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
int nr_done = 0;
u32 done_mask;
+ unexpect_irq(ap->irq_expect, false);
+
done_mask = ap->qc_active ^ qc_active;
if (unlikely(done_mask & qc_active)) {
@@ -5062,12 +5079,15 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
qc = ata_qc_from_tag(ap, tag);
if (qc) {
- ata_qc_complete(qc);
+ ata_qc_complete_raw(qc);
nr_done++;
}
done_mask &= ~(1 << tag);
}
+ if (ap->qc_active)
+ expect_irq(ap->irq_expect);
+
return nr_done;
}
@@ -5134,6 +5154,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
qc->err_mask |= ap->ops->qc_issue(qc);
if (unlikely(qc->err_mask))
goto err;
+ expect_irq(ap->irq_expect);
return;
sg_err:
@@ -6167,8 +6188,13 @@ int ata_host_activate(struct ata_host *host, int irq,
if (rc)
return rc;
- for (i = 0; i < host->n_ports; i++)
- ata_port_desc(host->ports[i], "irq %d", irq);
+ for (i = 0; i < host->n_ports; i++) {
+ struct ata_port *ap = host->ports[i];
+
+ if (!ata_port_is_dummy(ap))
+ ap->irq_expect = init_irq_expect(irq, host);
+ ata_port_desc(ap, "irq %d%s", irq, ap->irq_expect ? "+" : "");
+ }
rc = ata_host_register(host, sht);
/* if failed, just free the IRQ and leave ports alone */
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index c9ae299b8342..287e5b78482e 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -619,8 +619,10 @@ void ata_scsi_error(struct Scsi_Host *host)
* handler doesn't diddle with those qcs. This must
* be done atomically w.r.t. setting QCFLAG_FAILED.
*/
- if (nr_timedout)
+ if (nr_timedout) {
+ unexpect_irq(ap->irq_expect, true);
__ata_port_freeze(ap);
+ }
spin_unlock_irqrestore(ap->lock, flags);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 674c1436491f..57ea9c78af43 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -2388,7 +2388,8 @@ int ata_pci_sff_activate_host(struct ata_host *host,
struct device *dev = host->dev;
struct pci_dev *pdev = to_pci_dev(dev);
const char *drv_name = dev_driver_string(host->dev);
- int legacy_mode = 0, rc;
+ struct ata_port *ap[2] = { host->ports[0], host->ports[1] };
+ int legacy_mode = 0, i, rc;
rc = ata_host_start(host);
if (rc)
@@ -2422,29 +2423,29 @@ int ata_pci_sff_activate_host(struct ata_host *host,
if (rc)
goto out;
- ata_port_desc(host->ports[0], "irq %d", pdev->irq);
- ata_port_desc(host->ports[1], "irq %d", pdev->irq);
+ for (i = 0; i < 2; i++) {
+ if (!ata_port_is_dummy(ap[i]))
+ ap[i]->irq_expect =
+ init_irq_expect(pdev->irq, host);
+ ata_port_desc(ap[i], "irq %d%s",
+ pdev->irq, ap[i]->irq_expect ? "+" : "");
+ }
} else if (legacy_mode) {
- if (!ata_port_is_dummy(host->ports[0])) {
- rc = devm_request_irq(dev, ATA_PRIMARY_IRQ(pdev),
- irq_handler, IRQF_SHARED,
- drv_name, host);
- if (rc)
- goto out;
+ unsigned int irqs[2] = { ATA_PRIMARY_IRQ(pdev),
+ ATA_SECONDARY_IRQ(pdev) };
- ata_port_desc(host->ports[0], "irq %d",
- ATA_PRIMARY_IRQ(pdev));
- }
+ for (i = 0; i < 2; i++) {
+ if (ata_port_is_dummy(ap[i]))
+ continue;
- if (!ata_port_is_dummy(host->ports[1])) {
- rc = devm_request_irq(dev, ATA_SECONDARY_IRQ(pdev),
- irq_handler, IRQF_SHARED,
- drv_name, host);
+ rc = devm_request_irq(dev, irqs[i], irq_handler,
+ IRQF_SHARED, drv_name, host);
if (rc)
goto out;
- ata_port_desc(host->ports[1], "irq %d",
- ATA_SECONDARY_IRQ(pdev));
+ ap[i]->irq_expect = init_irq_expect(irqs[i], host);
+ ata_port_desc(ap[i], "irq %d%s",
+ irqs[i], ap[i]->irq_expect ? "+" : "");
}
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 7325f77480dc..1440dc0af242 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1137,17 +1137,13 @@ static void sata_fsl_host_intr(struct ata_port *ap)
ioread32(hcr_base + CE));
for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
- if (done_mask & (1 << i)) {
- qc = ata_qc_from_tag(ap, i);
- if (qc) {
- ata_qc_complete(qc);
- }
+ if (done_mask & (1 << i))
DPRINTK
("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA));
- }
}
+ ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
return;
} else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) {
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9463c71dd38e..f6b3b47f1f8a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -2713,18 +2713,11 @@ static void mv_err_intr(struct ata_port *ap)
}
}
-static void mv_process_crpb_response(struct ata_port *ap,
+static bool mv_process_crpb_response(struct ata_port *ap,
struct mv_crpb *response, unsigned int tag, int ncq_enabled)
{
u8 ata_status;
u16 edma_status = le16_to_cpu(response->flags);
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
- if (unlikely(!qc)) {
- ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
- __func__, tag);
- return;
- }
/*
* edma_status from a response queue entry:
@@ -2738,13 +2731,14 @@ static void mv_process_crpb_response(struct ata_port *ap,
* Error will be seen/handled by
* mv_err_intr(). So do nothing at all here.
*/
- return;
+ return false;
}
}
ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
if (!ac_err_mask(ata_status))
- ata_qc_complete(qc);
+ return true;
/* else: leave it for mv_err_intr() */
+ return false;
}
static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
@@ -2753,6 +2747,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
struct mv_host_priv *hpriv = ap->host->private_data;
u32 in_index;
bool work_done = false;
+ u32 done_mask = 0;
int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
/* Get the hardware queue position index */
@@ -2773,15 +2768,19 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
/* Gen II/IIE: get command tag from CRPB entry */
tag = le16_to_cpu(response->id) & 0x1f;
}
- mv_process_crpb_response(ap, response, tag, ncq_enabled);
+ if (mv_process_crpb_response(ap, response, tag, ncq_enabled))
+ done_mask |= 1 << tag;
work_done = true;
}
- /* Update the software queue position index in hardware */
- if (work_done)
+ if (work_done) {
+ ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
+
+ /* Update the software queue position index in hardware */
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
(pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
port_mmio + EDMA_RSP_Q_OUT_PTR);
+ }
}
static void mv_port_intr(struct ata_port *ap, u32 port_cause)
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index cb89ef8d99d9..7254e255fd78 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -873,29 +873,11 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
ata_port_freeze(ap);
else
ata_port_abort(ap);
- return 1;
+ return -1;
}
- if (likely(flags & NV_CPB_RESP_DONE)) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
- VPRINTK("CPB flags done, flags=0x%x\n", flags);
- if (likely(qc)) {
- DPRINTK("Completing qc from tag %d\n", cpb_num);
- ata_qc_complete(qc);
- } else {
- struct ata_eh_info *ehi = &ap->link.eh_info;
- /* Notifier bits set without a command may indicate the drive
- is misbehaving. Raise host state machine violation on this
- condition. */
- ata_port_printk(ap, KERN_ERR,
- "notifier for tag %d with no cmd?\n",
- cpb_num);
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- ata_port_freeze(ap);
- return 1;
- }
- }
+ if (likely(flags & NV_CPB_RESP_DONE))
+ return 1;
return 0;
}
@@ -1018,6 +1000,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
NV_ADMA_STAT_CPBERR |
NV_ADMA_STAT_CMD_COMPLETE)) {
u32 check_commands = notifier_clears[i];
+ u32 done_mask = 0;
int pos, rc;
if (status & NV_ADMA_STAT_CPBERR) {
@@ -1034,10 +1017,13 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
pos--;
rc = nv_adma_check_cpb(ap, pos,
notifier_error & (1 << pos));
- if (unlikely(rc))
+ if (rc > 0)
+ done_mask |= 1 << pos;
+ else if (unlikely(rc < 0))
check_commands = 0;
check_commands &= ~(1 << pos);
}
+ ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
}
}
@@ -2132,7 +2118,6 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
struct ata_eh_info *ehi = &ap->link.eh_info;
u32 sactive;
u32 done_mask;
- int i;
u8 host_stat;
u8 lack_dhfis = 0;
@@ -2152,27 +2137,11 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
sactive = readl(pp->sactive_block);
done_mask = pp->qc_active ^ sactive;
- if (unlikely(done_mask & sactive)) {
- ata_ehi_clear_desc(ehi);
- ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
- "(%08x->%08x)", pp->qc_active, sactive);
- ehi->err_mask |= AC_ERR_HSM;
- ehi->action |= ATA_EH_RESET;
- return -EINVAL;
- }
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- if (!(done_mask & (1 << i)))
- continue;
-
- qc = ata_qc_from_tag(ap, i);
- if (qc) {
- ata_qc_complete(qc);
- pp->qc_active &= ~(1 << i);
- pp->dhfis_bits &= ~(1 << i);
- pp->dmafis_bits &= ~(1 << i);
- pp->sdbfis_bits |= (1 << i);
- }
- }
+ pp->qc_active &= ~done_mask;
+ pp->dhfis_bits &= ~done_mask;
+ pp->dmafis_bits &= ~done_mask;
+ pp->sdbfis_bits |= done_mask;
+ ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
if (!ap->qc_active) {
DPRINTK("over\n");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a44611652282..0a9701b1e13f 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -610,8 +610,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
p->irqaction.name = dev_name(&p->pdev->dev);
p->irqaction.handler = sh_cmt_interrupt;
p->irqaction.dev_id = p;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "cmt_fck");
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index ef7a5be8a09f..9fe3507a453d 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -281,8 +281,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_mtu2_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index de715901b82a..75967f8e028e 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -387,8 +387,7 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
p->irqaction.handler = sh_tmu_interrupt;
p->irqaction.dev_id = p;
p->irqaction.irq = irq;
- p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
- IRQF_IRQPOLL | IRQF_NOBALANCING;
+ p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_NOBALANCING;
/* get hold of clock */
p->clk = clk_get(&p->pdev->dev, "tmu_fck");
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 5cca00a6d09d..2d6e53a180fd 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2278,6 +2278,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
"request interrupt %d failed\n", irqnum);
goto err_request_irq;
}
+ watch_irq(irqnum, hcd);
hcd->irq = irqnum;
dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
(hcd->driver->flags & HCD_MEMORY) ?
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a0384a4d1e6f..76a1192c14be 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -47,9 +47,6 @@
* IRQF_TIMER - Flag to mark this interrupt as timer interrupt
* IRQF_PERCPU - Interrupt is per cpu
* IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
- * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
- * registered first in an shared interrupt is considered for
- * performance reasons)
* IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
* Used by threaded interrupts which need to keep the
* irq line disabled until the threaded handler has been run.
@@ -63,7 +60,6 @@
#define __IRQF_TIMER 0x00000200
#define IRQF_PERCPU 0x00000400
#define IRQF_NOBALANCING 0x00000800
-#define IRQF_IRQPOLL 0x00001000
#define IRQF_ONESHOT 0x00002000
#define IRQF_NO_SUSPEND 0x00004000
@@ -97,6 +93,16 @@ enum {
typedef irqreturn_t (*irq_handler_t)(int, void *);
+struct irq_expect;
+
+struct irq_watch {
+ irqreturn_t last_ret;
+ unsigned int flags;
+ unsigned long started;
+ unsigned int nr_samples;
+ unsigned int nr_polled;
+};
+
/**
* struct irqaction - per interrupt action descriptor
* @handler: interrupt handler function
@@ -109,18 +115,22 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
* @thread_fn: interupt handler function for threaded interrupts
* @thread: thread pointer for threaded interrupts
* @thread_flags: flags related to @thread
+ * @watch: data for irq watching
+ * @expects: data for irq expecting
*/
struct irqaction {
- irq_handler_t handler;
- unsigned long flags;
- const char *name;
- void *dev_id;
- struct irqaction *next;
- int irq;
- struct proc_dir_entry *dir;
- irq_handler_t thread_fn;
- struct task_struct *thread;
- unsigned long thread_flags;
+ irq_handler_t handler;
+ unsigned long flags;
+ const char *name;
+ void *dev_id;
+ struct irqaction *next;
+ int irq;
+ struct proc_dir_entry *dir;
+ irq_handler_t thread_fn;
+ struct task_struct *thread;
+ unsigned long thread_flags;
+ struct irq_watch watch;
+ struct irq_expect *expects;
};
extern irqreturn_t no_action(int cpl, void *dev_id);
@@ -193,6 +203,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
+extern struct irq_expect *init_irq_expect(unsigned int irq, void *dev_id);
+extern void expect_irq(struct irq_expect *exp);
+extern void unexpect_irq(struct irq_expect *exp, bool timedout);
+extern void watch_irq(unsigned int irq, void *dev_id);
+
/*
* On lockdep we dont want to enable hardirqs in hardirq
* context. Use local_irq_enable_in_hardirq() to annotate
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fff1d77c3753..4cb22259edd9 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -21,6 +21,7 @@
#include <linux/irqreturn.h>
#include <linux/irqnr.h>
#include <linux/errno.h>
+#include <linux/timer.h>
#include <linux/topology.h>
#include <linux/wait.h>
@@ -71,6 +72,8 @@ typedef void (*irq_flow_handler_t)(unsigned int irq,
#define IRQ_SUSPENDED 0x04000000 /* IRQ has gone through suspend sequence */
#define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */
#define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */
+#define IRQ_IN_POLLING 0x20000000 /* IRQ polling in progress */
+#define IRQ_CHECK_WATCHES 0x40000000 /* IRQ watch enabled */
#ifdef CONFIG_IRQ_PER_CPU
# define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU)
@@ -144,6 +147,17 @@ struct irq_chip {
struct timer_rand_state;
struct irq_2_iommu;
+
+/* spurious IRQ tracking and handling */
+struct irq_spr {
+ unsigned long last_bad; /* when was the last bad? */
+ unsigned long period_start; /* period start jiffies */
+ unsigned int nr_samples; /* nr of irqs in this period */
+ unsigned int nr_bad; /* nr of bad deliveries */
+ unsigned int poll_cnt; /* nr to poll once activated */
+ unsigned int poll_rem; /* how many polls are left? */
+};
+
/**
* struct irq_desc - interrupt descriptor
* @irq: interrupt number for this descriptor
@@ -160,15 +174,14 @@ struct irq_2_iommu;
* @status: status information
* @depth: disable-depth, for nested irq_disable() calls
* @wake_depth: enable depth, for multiple set_irq_wake() callers
- * @irq_count: stats field to detect stalled irqs
- * @last_unhandled: aging timer for unhandled count
- * @irqs_unhandled: stats field for spurious unhandled interrupts
* @lock: locking for SMP
* @affinity: IRQ affinity on SMP
* @node: node index useful for balancing
* @pending_mask: pending rebalanced interrupts
* @threads_active: number of irqaction threads currently running
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
+ * @spr: data for spurious IRQ handling
+ * @poll_timer: timer for IRQ polling
* @dir: /proc/irq/ procfs entry
* @name: flow handler name for /proc/interrupts output
*/
@@ -189,9 +202,6 @@ struct irq_desc {
unsigned int depth; /* nested irq disables */
unsigned int wake_depth; /* nested wake enables */
- unsigned int irq_count; /* For detecting broken IRQs */
- unsigned long last_unhandled; /* Aging timer for unhandled count */
- unsigned int irqs_unhandled;
raw_spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
@@ -203,6 +213,11 @@ struct irq_desc {
#endif
atomic_t threads_active;
wait_queue_head_t wait_for_threads;
+
+ struct irq_spr spr;
+ struct timer_list poll_timer;
+ bool poll_warned;
+
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *dir;
#endif
@@ -324,8 +339,17 @@ static inline void generic_handle_irq(unsigned int irq)
}
/* Handling of unhandled and spurious interrupts: */
-extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret);
+extern void __note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret);
+
+static inline void note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret)
+{
+ extern int noirqdebug;
+
+ if (!noirqdebug)
+ __note_interrupt(irq, desc, action_ret);
+}
/* Resending of interrupts :*/
void check_irq_resend(struct irq_desc *desc, unsigned int irq);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index f010f18a0f86..dcdd1373807a 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -751,6 +751,8 @@ struct ata_port {
struct ata_host *host;
struct device *dev;
+ struct irq_expect *irq_expect; /* for irq expecting */
+
struct mutex scsi_scan_mutex;
struct delayed_work hotplug_task;
struct work_struct scsi_rescan_task;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b7091d5ca2f8..45a87f57ade7 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -40,8 +40,6 @@ static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
if (!keep_chip_data)
desc->chip_data = NULL;
desc->action = NULL;
- desc->irq_count = 0;
- desc->irqs_unhandled = 0;
#ifdef CONFIG_SMP
cpumask_setall(desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -406,8 +404,7 @@ void handle_nested_irq(unsigned int irq)
raw_spin_unlock_irq(&desc->lock);
action_ret = action->thread_fn(action->irq, action->dev_id);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock_irq(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
@@ -450,8 +447,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
@@ -495,8 +491,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
@@ -548,8 +543,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
@@ -625,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
desc->status &= ~IRQ_PENDING;
raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
@@ -654,8 +647,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
desc->chip->ack(irq);
action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
if (desc->chip->eoi)
desc->chip->eoi(irq);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 27e5c6911223..685c3b3cf465 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -416,6 +416,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
}
retval |= ret;
+ action->watch.last_ret = ret;
action = action->next;
} while (action);
@@ -461,8 +462,7 @@ unsigned int __do_IRQ(unsigned int irq)
desc->chip->ack(irq);
if (likely(!(desc->status & IRQ_DISABLED))) {
action_ret = handle_IRQ_event(irq, desc->action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
}
desc->chip->end(irq);
return 1;
@@ -515,8 +515,7 @@ unsigned int __do_IRQ(unsigned int irq)
raw_spin_unlock(&desc->lock);
action_ret = handle_IRQ_event(irq, action);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret);
+ note_interrupt(irq, desc, action_ret);
raw_spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index c63f3bc88f0b..1b24309a0404 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -2,8 +2,6 @@
* IRQ subsystem internal functions and variables:
*/
-extern int noirqdebug;
-
/* Set default functions for irq_chip structures: */
extern void irq_chip_set_defaults(struct irq_chip *chip);
@@ -40,6 +38,12 @@ extern int irq_select_affinity_usr(unsigned int irq);
extern void irq_set_thread_affinity(struct irq_desc *desc);
+extern void poll_irq(unsigned long arg);
+extern void irq_poll_action_added(struct irq_desc *desc,
+ struct irqaction *action);
+extern void irq_poll_action_removed(struct irq_desc *desc,
+ struct irqaction *action);
+
/* Inline functions for support of irq chips on slow busses */
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
{
@@ -64,7 +68,7 @@ static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
{
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
- irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
+ irq, desc, desc->depth, desc->spr.nr_samples, desc->spr.nr_bad);
printk("->handle_irq(): %p, ", desc->handle_irq);
print_symbol("%s\n", (unsigned long)desc->handle_irq);
printk("->chip(): %p, ", desc->chip);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index c3003e9d91a3..8533e32d91d2 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -755,6 +755,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
irq_chip_set_defaults(desc->chip);
init_waitqueue_head(&desc->wait_for_threads);
+ setup_timer(&desc->poll_timer, poll_irq, (unsigned long)desc);
/* Setup the type (level, edge polarity) if configured: */
if (new->flags & IRQF_TRIGGER_MASK) {
@@ -803,21 +804,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
new->irq = irq;
*old_ptr = new;
- /* Reset broken irq detection when installing new handler */
- desc->irq_count = 0;
- desc->irqs_unhandled = 0;
-
- /*
- * Check whether we disabled the irq via the spurious handler
- * before. Reenable it and give it another chance.
- */
- if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
- desc->status &= ~IRQ_SPURIOUS_DISABLED;
- __enable_irq(desc, irq, false);
- }
-
raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_poll_action_added(desc, new);
+
/*
* Strictly no need to wake it up, but hung_task complains
* when no hard interrupt wakes the thread up.
@@ -933,6 +923,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
raw_spin_unlock_irqrestore(&desc->lock, flags);
+ irq_poll_action_removed(desc, action);
+
unregister_handler_proc(irq, action);
/* Make sure it's not being used on another CPU: */
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 09a2ee540bd2..b072460a602f 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -205,10 +205,11 @@ static const struct file_operations irq_node_proc_fops = {
static int irq_spurious_proc_show(struct seq_file *m, void *v)
{
struct irq_desc *desc = irq_to_desc((long) m->private);
+ struct irq_spr *spr = &desc->spr;
seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
- desc->irq_count, desc->irqs_unhandled,
- jiffies_to_msecs(desc->last_unhandled));
+ spr->nr_samples, spr->nr_bad,
+ jiffies_to_msecs(spr->last_bad));
return 0;
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 89fb90ae534f..b34b023dab5b 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -2,33 +2,637 @@
* linux/kernel/irq/spurious.c
*
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
+ * Copyright (C) 2010 SUSE Linux Products GmbH
+ * Copyright (C) 2010 Tejun Heo <tj@kernel.org>
*
- * This file contains spurious interrupt handling.
+ * There are two ways interrupt handling can go wrong - too few or too
+ * many. Due to misrouting or other issues, sometimes IRQs don't
+ * reach the driver while at other times an interrupt line gets stuck
+ * and a continuous spurious interrupts are generated.
+ *
+ * This file implements workaround for both cases. Lost interrupts
+ * are handled by IRQ expecting and watching, and spurious interrupts
+ * by spurious polling. All mechanisms need IRQF_SHARED to be set on
+ * the irqaction in question.
+ *
+ * Both lost interrupt workarounds require cooperation from drivers
+ * and can be chosen depending on how much information the driver can
+ * provide.
+ *
+ * - IRQ expecting
+ *
+ * IRQ expecting is useful when the driver can tell when IRQs can be
+ * expected; in other words, when IRQs are used to signal completion
+ * of host initiated operations. This is the surest way to work
+ * around lost interrupts.
+ *
+ * When the controller is expected to raise an IRQ, the driver
+ * should call expect_irq() and, when the expected event happens or
+ * times out, unexpect_irq(). IRQ subsystem polls the interrupt
+ * inbetween.
+ *
+ * As interrupts tend to keep working if it works at the beginning,
+ * IRQ expecting implements "verified state". After certain number
+ * of successful IRQ deliveries, the irqaction becomes verified and
+ * much longer polling interval is used.
+ *
+ * - IRQ watching
+ *
+ * This can be used when the driver doesn't know when to exactly
+ * expect and unexpect IRQs. Once watch_irq() is called, the
+ * irqaction is slowly polled for certain amount of time (1min). If
+ * IRQs are missed during that time, the irqaction is marked and
+ * actively polled; otherwise, the watching is stopped.
+ *
+ * In the most basic case, drivers can call this right after
+ * registering an irqaction to verify IRQ delivery. In many cases,
+ * if IRQ works at the beginning, it keeps working, so just calling
+ * watch_irq() once can provide decent protection against misrouted
+ * IRQs. It would also be a good idea to call watch_irq() when
+ * timeouts are detected.
+ *
+ * - Spurious IRQ handling
+ *
+ * All IRQs are continuously monitored and spurious IRQ handling
+ * kicks in if there are too many spurious IRQs. The IRQ is
+ * disabled and the registered irqactions are polled. The IRQ is
+ * given another shot after certain number IRQs are handled or an
+ * irqaction is added or removed.
+ *
+ * All of the above three mechanisms can be used together. Spurious
+ * IRQ handling is enabled by default and drivers are free to expect
+ * and watch IRQs as they see fit.
*/
#include <linux/jiffies.h>
#include <linux/irq.h>
+#include <linux/log2.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
-#include <linux/timer.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/*
+ * I spent quite some time thinking about each parameter but they
+ * still are just numbers pulled out of my ass. If you think your ass
+ * is prettier than mine, please go ahead and suggest better ones.
+ *
+ * Most parameters are intentionally fixed constants and not
+ * adjustable through API. The nature of IRQ delivery failures isn't
+ * usually dependent on specific drivers and the timing parameters are
+ * more about human perceivable latencies rather than any specific
+ * controller timing details, so figuring out constant values which
+ * can work for most cases shouldn't be too hard. This allows tighter
+ * control over polling behaviors, eases future changes and makes the
+ * interface easy for drivers.
+ */
+enum {
+ /* irqfixup levels */
+ IRQFIXUP_SPURIOUS = 0, /* spurious storm detection */
+ IRQFIXUP_MISROUTED = 1, /* misrouted IRQ fixup */
+ IRQFIXUP_POLL = 2, /* enable polling by default */
+
+ /* IRQ polling common parameters */
+ IRQ_POLL_SLOW_INTV = 3 * HZ, /* not too slow for ppl, slow enough for machine */
+ IRQ_POLL_INTV = HZ / 100, /* from the good ol' 100HZ tick */
+ IRQ_POLL_QUICK_INTV = HZ / 1000, /* on every tick, basically */
+
+ IRQ_POLL_SLOW_SLACK = HZ,
+ IRQ_POLL_SLACK = HZ / 250, /* 1 tick slack w/ the popular 250HZ config */
+ IRQ_POLL_QUICK_SLACK = HZ / 10000, /* no slack, basically */
+
+ /*
+ * IRQ expect parameters.
+ *
+ * Because IRQ expecting is tightly coupled with the actual
+ * activity of the controller, we can be slightly aggressive
+ * and try to minimize the effect of lost interrupts.
+ *
+ * An irqaction must accumulate VERIFY_GOAL good deliveries,
+ * where one bad delivery (delivered by polling) costs
+ * BAD_FACTOR good ones, before reaching the verified state.
+ *
+ * QUICK_SAMPLES IRQ deliveries are examined and if
+ * >=QUICK_THRESHOLD of them are polled on the first poll, the
+ * IRQ is considered to be quick and QUICK_INTV is used
+ * instead.
+ *
+ * Keep QUICK_SAMPLES much higher than VERIFY_GOAL so that
+ * quick polling doesn't interfact with the initial
+ * verification attempt (quicker polling increases the chance
+ * of polled deliveries).
+ */
+ IRQ_EXP_BAD_FACTOR = 10,
+ IRQ_EXP_VERIFY_GOAL = 256,
+ IRQ_EXP_QUICK_SAMPLES = IRQ_EXP_VERIFY_GOAL * 4,
+ IRQ_EXP_QUICK_THRESHOLD = IRQ_EXP_QUICK_SAMPLES * 8 / 10,
+
+ /* IRQ expect flags */
+ IRQ_EXPECTING = (1 << 0), /* expecting in progress */
+ IRQ_EXP_VERIFIED = (1 << 1), /* delivery verified, use slow interval */
+ IRQ_EXP_QUICK = (1 << 2), /* quick polling enabled */
+ IRQ_EXP_WARNED = (1 << 3), /* already whined */
+
+ /*
+ * IRQ watch parameters.
+ *
+ * As IRQ watching has much less information about what's
+ * going on, the parameters are more conservative. It will
+ * terminate unless it can reliably determine that IRQ
+ * delivery isn't working.
+ *
+ * IRQs are watched in timed intervals which is BASE_PERIOD
+ * long by default. Polling interval starts at BASE_INTV and
+ * grows upto SLOW_INTV if no bad delivery is detected.
+ *
+ * If a period contains zero sample and no bad delivery was
+ * seen since watch started, watch terminates.
+ *
+ * If a period contains >=1 but <MIN_SAMPLES deliveries,
+ * collected samples are inherited to the next period.
+ *
+ * If it contains enough samples, the ratio between good and
+ * bad deliveries are examined, if >=BAD_PCT% are bad, the
+ * irqaction is tagged bad and watched indefinitely. if
+ * BAD_PCT% > nr_bad >= WARY_PCT%, WARY_PERIOD is used instead
+ * of BASE_PERIOD and the whole process is restarted. If
+ * <WARY_PCT% are bad, watch terminates.
+ */
+ IRQ_WAT_MIN_SAMPLES = 10,
+ IRQ_WAT_BASE_INTV = HZ / 2,
+ IRQ_WAT_BASE_PERIOD = 60 * HZ,
+ IRQ_WAT_WARY_PERIOD = 600 * HZ,
+ IRQ_WAT_WARY_PCT = 1,
+ IRQ_WAT_BAD_PCT = 10,
+
+ /* IRQ watch flags */
+ IRQ_WATCHING = (1 << 0),
+ IRQ_WAT_POLLED = (1 << 1),
+ IRQ_WAT_WARY = (1 << 2),
+ IRQ_WAT_BAD = (1 << 3),
+
+ /*
+ * Spurious IRQ handling parameters.
+ *
+ * As this per-IRQ spurious handling is cheaper than the
+ * previous system wide spurious handling, it can afford to
+ * use more responsive settings but these parameters are still
+ * pretty conservative. If ever necessary, making it more
+ * responsive shouldn't cause any problem.
+ *
+ * Spurious IRQs are monitored in segments of PERIOD_SAMPLES
+ * IRQs which can stretch PERIOD_DURATION at maximum. If
+ * there are less than PERIOD_SAMPLES IRQs per
+ * PERIOD_DURATION, the period is considered good.
+ *
+ * If >=BAD_THRESHOLD IRQs are bad ones, the period is
+ * considered bad and spurious IRQ handling kicks in - the IRQ
+ * is disabled and polled. The IRQ is given another shot
+ * after certain number IRQs are handled, which is at minimum
+ * POLL_CNT_MIN, increased by 1 << POLL_CNT_INC_SHIFT times
+ * after each bad period and decreased by factor of
+ * POLL_CNT_INC_DEC_SHIFT after each good one.
+ */
+ IRQ_SPR_PERIOD_DURATION = 10 * HZ,
+ IRQ_SPR_PERIOD_SAMPLES = 10000,
+ IRQ_SPR_BAD_THRESHOLD = 9900,
+ IRQ_SPR_POLL_CNT_MIN = 10000,
+ IRQ_SPR_POLL_CNT_INF = UINT_MAX,
+ IRQ_SPR_POLL_CNT_INC_SHIFT = 3,
+ IRQ_SPR_POLL_CNT_DEC_SHIFT = 1,
+ IRQ_SPR_POLL_CNT_MAX_DEC_SHIFT = BITS_PER_BYTE * sizeof(int) / 4,
+};
+
+struct irq_expect {
+ struct irq_expect *next;
+ struct irq_desc *desc; /* the associated IRQ desc */
+ struct irqaction *act; /* the associated IRQ action */
+
+ unsigned int flags; /* IRQ_EXP_* flags */
+ unsigned int nr_samples; /* nr of collected samples in this period */
+ unsigned int nr_quick; /* nr of polls completed after single attempt */
+ unsigned int nr_good; /* nr of good IRQ deliveries */
+ unsigned long started; /* when this period started */
+};
+
+int noirqdebug __read_mostly;
+static int irqfixup __read_mostly = IRQFIXUP_SPURIOUS;
+
+static struct irqaction *find_irq_action(struct irq_desc *desc, void *dev_id)
+{
+ struct irqaction *act;
+
+ for (act = desc->action; act; act = act->next)
+ if (act->dev_id == dev_id)
+ return act;
+ return NULL;
+}
+
+static void print_irq_handlers(struct irq_desc *desc)
+{
+ struct irqaction *action;
+
+ printk(KERN_ERR "handlers:\n");
+
+ action = desc->action;
+ while (action) {
+ printk(KERN_ERR "[<%p>]", action->handler);
+ print_symbol(" (%s)", (unsigned long)action->handler);
+ printk("\n");
+ action = action->next;
+ }
+}
+
+static void warn_irq_poll(struct irq_desc *desc, struct irqaction *act)
+{
+ if (desc->poll_warned)
+ return;
+
+ desc->poll_warned = true;
+
+ printk(KERN_WARNING "IRQ %u: %s: can't verify IRQ, will keep polling\n",
+ desc->irq, act->name);
+ printk(KERN_WARNING "IRQ %u: %s: system performance may be affected\n",
+ desc->irq, act->name);
+}
+
+static unsigned long irq_poll_slack(unsigned long intv)
+{
+ if (intv >= IRQ_POLL_SLOW_INTV)
+ return IRQ_POLL_SLOW_SLACK;
+ else if (intv >= IRQ_POLL_INTV)
+ return IRQ_POLL_SLACK;
+ else
+ return IRQ_POLL_QUICK_SLACK;
+}
+
+/**
+ * irq_schedule_poll - schedule IRQ poll
+ * @desc: IRQ desc to schedule poll for
+ * @intv: poll interval
+ *
+ * Schedules @desc->poll_timer. If the timer is already scheduled,
+ * it's modified iff jiffies + @intv + slack is before the timer's
+ * expires. poll_timers aren't taken offline behind this function's
+ * back and the users of this function are guaranteed that poll_irq()
+ * will be called at or before jiffies + @intv + slack.
+ *
+ * CONTEXT:
+ * desc->lock
+ */
+static void irq_schedule_poll(struct irq_desc *desc, unsigned long intv)
+{
+ unsigned long expires = jiffies + intv;
+ int slack = irq_poll_slack(intv);
-static int irqfixup __read_mostly;
+ if (timer_pending(&desc->poll_timer) &&
+ time_before_eq(desc->poll_timer.expires, expires + slack))
+ return;
+
+ set_timer_slack(&desc->poll_timer, slack);
+ mod_timer(&desc->poll_timer, expires);
+}
+
+static unsigned long irq_exp_intv(struct irq_expect *exp)
+{
+ if (!(exp->flags & IRQ_EXPECTING))
+ return MAX_JIFFY_OFFSET;
+ if (exp->flags & IRQ_EXP_VERIFIED)
+ return IRQ_POLL_SLOW_INTV;
+ if (exp->flags & IRQ_EXP_QUICK)
+ return IRQ_POLL_QUICK_INTV;
+ return IRQ_POLL_INTV;
+}
+
+/**
+ * init_irq_expect - initialize IRQ expecting
+ * @irq: IRQ to expect
+ * @dev_id: dev_id of the irqaction to expect
+ *
+ * Initializes IRQ expecting and returns expect token to use. This
+ * function can be called multiple times for the same irqaction and
+ * each token can be used independently.
+ *
+ * CONTEXT:
+ * Does GFP_KERNEL allocation.
+ *
+ * RETURNS:
+ * irq_expect token to use on success, %NULL on failure.
+ */
+struct irq_expect *init_irq_expect(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *act;
+ struct irq_expect *exp;
+ unsigned long flags;
+
+ if (noirqdebug || WARN_ON_ONCE(!desc))
+ return NULL;
+
+ exp = kzalloc(sizeof(*exp), GFP_KERNEL);
+ if (!exp) {
+ printk(KERN_WARNING "IRQ %u: failed to initialize IRQ expect, "
+ "allocation failed\n", irq);
+ return NULL;
+ }
+
+ exp->desc = desc;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ act = find_irq_action(desc, dev_id);
+ if (!WARN_ON_ONCE(!act)) {
+ exp->act = act;
+ exp->next = act->expects;
+ act->expects = exp;
+ } else {
+ kfree(exp);
+ exp = NULL;
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return exp;
+}
+EXPORT_SYMBOL_GPL(init_irq_expect);
+
+/**
+ * expect_irq - expect IRQ
+ * @exp: expect token acquired from init_irq_expect(), %NULL is allowed
+ *
+ * Tell IRQ subsystem to expect an IRQ. The IRQ might be polled until
+ * unexpect_irq() is called on @exp. If @exp is %NULL, this function
+ * becomes noop.
+ *
+ * This function is fairly cheap and drivers can call it for each
+ * interrupt driven operation without adding noticeable overhead in
+ * most cases.
+ *
+ * CONTEXT:
+ * Don't care. The caller is responsible for ensuring
+ * [un]expect_irq() calls don't overlap. Overlapping may lead to
+ * unexpected polling behaviors but won't directly cause a failure.
+ */
+void expect_irq(struct irq_expect *exp)
+{
+ struct irq_desc *desc;
+ unsigned long intv, deadline;
+ unsigned long flags;
+
+ /* @exp is NULL if noirqdebug */
+ if (unlikely(!exp))
+ return;
+
+ desc = exp->desc;
+ exp->flags |= IRQ_EXPECTING;
-#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
-static void poll_spurious_irqs(unsigned long dummy);
-static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
+ /*
+ * Paired with mb in poll_irq(). Either we see timer pending
+ * cleared or poll_irq() sees IRQ_EXPECTING.
+ */
+ smp_mb();
+
+ exp->started = jiffies;
+ intv = irq_exp_intv(exp);
+ deadline = exp->started + intv + irq_poll_slack(intv);
+
+ /*
+ * poll_timer is never explicitly killed unless there's no
+ * action left on the irq; also, while it's online, timer
+ * duration is only shortened, which means that if we see
+ * ->expires in the future and not later than our deadline,
+ * the timer is guaranteed to fire before it.
+ */
+ if (!timer_pending(&desc->poll_timer) ||
+ time_after_eq(jiffies, desc->poll_timer.expires) ||
+ time_before(deadline, desc->poll_timer.expires)) {
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ irq_schedule_poll(desc, intv);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ }
+}
+EXPORT_SYMBOL_GPL(expect_irq);
+
+/**
+ * unexpect_irq - unexpect IRQ
+ * @exp: expect token acquired from init_irq_expect(), %NULL is allowed
+ * @timedout: did the IRQ timeout?
+ *
+ * Tell IRQ subsystem to stop expecting an IRQ. Set @timedout to
+ * %true if the expected IRQ never arrived. If @exp is %NULL, this
+ * function becomes noop.
+ *
+ * This function is fairly cheap and drivers can call it for each
+ * interrupt driven operation without adding noticeable overhead in
+ * most cases.
+ *
+ * CONTEXT:
+ * Don't care. The caller is responsible for ensuring
+ * [un]expect_irq() calls don't overlap. Overlapping may lead to
+ * unexpected polling behaviors but won't directly cause a failure.
+ */
+void unexpect_irq(struct irq_expect *exp, bool timedout)
+{
+ struct irq_desc *desc;
+
+ /* @exp is NULL if noirqdebug */
+ if (unlikely(!exp) || (!(exp->flags & IRQ_EXPECTING) && !timedout))
+ return;
+
+ desc = exp->desc;
+ exp->flags &= ~IRQ_EXPECTING;
+
+ /* succesful completion from IRQ? */
+ if (likely(!(desc->status & IRQ_IN_POLLING) && !timedout)) {
+ /*
+ * IRQ seems a bit more trustworthy. Allow nr_good to
+ * increase till VERIFY_GOAL + BAD_FACTOR - 1 so that
+ * single succesful delivery can recover verified
+ * state after an accidental polling hit.
+ */
+ if (unlikely(exp->nr_good <
+ IRQ_EXP_VERIFY_GOAL + IRQ_EXP_BAD_FACTOR - 1) &&
+ ++exp->nr_good >= IRQ_EXP_VERIFY_GOAL) {
+ exp->flags |= IRQ_EXP_VERIFIED;
+ exp->nr_samples = 0;
+ exp->nr_quick = 0;
+ }
+ return;
+ }
+
+ /* timedout or polled */
+ if (timedout) {
+ exp->nr_good = 0;
+ } else {
+ exp->nr_good -= min_t(unsigned int,
+ exp->nr_good, IRQ_EXP_BAD_FACTOR);
+
+ if (time_before_eq(jiffies, exp->started + IRQ_POLL_INTV))
+ exp->nr_quick++;
+
+ if (++exp->nr_samples >= IRQ_EXP_QUICK_SAMPLES) {
+ /*
+ * Use quick sampling checkpoints as warning
+ * checkpoints too.
+ */
+ if (!(exp->flags & IRQ_EXP_WARNED) &&
+ !desc->spr.poll_rem) {
+ warn_irq_poll(desc, exp->act);
+ exp->flags |= IRQ_EXP_WARNED;
+ }
+
+ exp->flags &= ~IRQ_EXP_QUICK;
+ if (exp->nr_quick >= IRQ_EXP_QUICK_THRESHOLD)
+ exp->flags |= IRQ_EXP_QUICK;
+ exp->nr_samples = 0;
+ exp->nr_quick = 0;
+ }
+ }
+
+ exp->flags &= ~IRQ_EXP_VERIFIED;
+}
+EXPORT_SYMBOL_GPL(unexpect_irq);
+
+/**
+ * irq_update_watch - IRQ handled, update watch state
+ * @desc: IRQ desc of interest
+ * @act: IRQ action of interest
+ * @via_poll: IRQ was handled via poll
+ *
+ * Called after IRQ is successfully delievered or polled. Updates
+ * watch state accordingly and determines which watch interval to use.
+ *
+ * CONTEXT:
+ * desc->lock
+ *
+ * RETURNS:
+ * Watch poll interval to use, MAX_JIFFY_OFFSET if watch polling isn't
+ * necessary.
+ */
+static unsigned long irq_update_watch(struct irq_desc *desc,
+ struct irqaction *act, bool via_poll)
+{
+ struct irq_watch *wat = &act->watch;
+ unsigned long period = wat->flags & IRQ_WAT_WARY ?
+ IRQ_WAT_WARY_PERIOD : IRQ_WAT_BASE_PERIOD;
+
+ /* if not watching or already determined to be bad, it's easy */
+ if (!(wat->flags & IRQ_WATCHING))
+ return MAX_JIFFY_OFFSET;
+ if (wat->flags & IRQ_WAT_BAD)
+ return IRQ_POLL_INTV;
+
+ /* don't expire watch period while spurious polling is in effect */
+ if (desc->spr.poll_rem) {
+ wat->started = jiffies;
+ return IRQ_POLL_INTV;
+ }
+
+ /* IRQ was handled, record whether it was a good or bad delivery */
+ if (wat->last_ret == IRQ_HANDLED) {
+ wat->nr_samples++;
+ if (via_poll) {
+ wat->nr_polled++;
+ wat->flags |= IRQ_WAT_POLLED;
+ }
+ }
+
+ /* is this watch period over? */
+ if (time_after(jiffies, wat->started + period)) {
+ unsigned int wry_thr = wat->nr_samples * IRQ_WAT_WARY_PCT / 100;
+ unsigned int bad_thr = wat->nr_samples * IRQ_WAT_BAD_PCT / 100;
+
+ if (wat->nr_samples >= IRQ_WAT_MIN_SAMPLES) {
+ /* have enough samples, determine what to do */
+ if (wat->nr_polled <= wry_thr)
+ wat->flags &= ~IRQ_WATCHING;
+ else if (wat->nr_polled <= bad_thr)
+ wat->flags |= IRQ_WAT_WARY;
+ else {
+ warn_irq_poll(desc, act);
+ wat->flags |= IRQ_WAT_BAD;
+ }
+ wat->nr_samples = 0;
+ wat->nr_polled = 0;
+ } else if (!wat->nr_samples || !(wat->flags & IRQ_WAT_POLLED)) {
+ /* not sure but let's not hold onto it */
+ wat->flags &= ~IRQ_WATCHING;
+ }
+
+ wat->started = jiffies;
+ }
+
+ if (!(wat->flags & IRQ_WATCHING))
+ return MAX_JIFFY_OFFSET;
+ if (wat->flags & IRQ_WAT_POLLED)
+ return IRQ_POLL_INTV;
+ /* every delivery upto this point has been successful, grow interval */
+ return clamp_t(unsigned long, jiffies - wat->started,
+ IRQ_WAT_BASE_INTV, IRQ_POLL_SLOW_INTV);
+}
+
+/**
+ * watch_irq - watch an irqaction
+ * @irq: IRQ the irqaction to watch belongs to
+ * @dev_id: dev_id for the irqaction to watch
+ *
+ * LOCKING:
+ * Grabs and releases desc->lock.
+ */
+void watch_irq(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *act;
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(!desc))
+ return;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ act = find_irq_action(desc, dev_id);
+ if (!WARN_ON_ONCE(!act)) {
+ struct irq_watch *wat = &act->watch;
+
+ wat->flags |= IRQ_WATCHING;
+ wat->started = jiffies;
+ wat->nr_samples = 0;
+ wat->nr_polled = 0;
+ desc->status |= IRQ_CHECK_WATCHES;
+ irq_schedule_poll(desc, IRQ_WAT_BASE_INTV);
+ }
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(watch_irq);
+
+/* start a new spurious handling period */
+static void irq_spr_new_period(struct irq_spr *spr)
+{
+ spr->period_start = jiffies;
+ spr->nr_samples = 0;
+ spr->nr_bad = 0;
+}
+
+/* Reset spurious handling. After this, poll_timer will offline itself soon. */
+static void irq_spr_reset(struct irq_spr *spr)
+{
+ irq_spr_new_period(spr);
+ spr->poll_cnt = IRQ_SPR_POLL_CNT_MIN;
+ spr->poll_rem = 0;
+}
/*
- * Recovery handler for misrouted interrupts.
+ * Perform an actual poll.
*/
static int try_one_irq(int irq, struct irq_desc *desc)
{
struct irqaction *action;
int ok = 0, work = 0;
- raw_spin_lock(&desc->lock);
/* Already running on another processor */
if (desc->status & IRQ_INPROGRESS) {
/*
@@ -37,7 +641,6 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (desc->action && (desc->action->flags & IRQF_SHARED))
desc->status |= IRQ_PENDING;
- raw_spin_unlock(&desc->lock);
return ok;
}
/* Honour the normal IRQ locking */
@@ -48,8 +651,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
while (action) {
/* Only shared IRQ handlers are safe to call */
if (action->flags & IRQF_SHARED) {
- if (action->handler(irq, action->dev_id) ==
- IRQ_HANDLED)
+ action->watch.last_ret =
+ action->handler(irq, action->dev_id);
+ if (action->watch.last_ret == IRQ_HANDLED)
ok = 1;
}
action = action->next;
@@ -80,7 +684,6 @@ static int try_one_irq(int irq, struct irq_desc *desc)
*/
if (work && desc->chip && desc->chip->end)
desc->chip->end(irq);
- raw_spin_unlock(&desc->lock);
return ok;
}
@@ -97,172 +700,275 @@ static int misrouted_irq(int irq)
if (i == irq) /* Already tried */
continue;
+ raw_spin_lock(&desc->lock);
if (try_one_irq(i, desc))
ok = 1;
+ raw_spin_unlock(&desc->lock);
}
/* So the caller can adjust the irq error counts */
return ok;
}
-static void poll_spurious_irqs(unsigned long dummy)
+/*
+ * IRQ delivery notification function. Called after each IRQ delivery.
+ */
+void __note_interrupt(unsigned int irq, struct irq_desc *desc,
+ irqreturn_t action_ret)
{
- struct irq_desc *desc;
- int i;
+ struct irq_spr *spr = &desc->spr;
+ unsigned long dur;
+ unsigned int cnt, abbr;
+ char unit = 'k';
- for_each_irq_desc(i, desc) {
- unsigned int status;
+ /* first, take care of IRQ watches */
+ if (unlikely(desc->status & IRQ_CHECK_WATCHES)) {
+ unsigned long intv = MAX_JIFFY_OFFSET;
+ struct irqaction *act;
- if (!i)
- continue;
+ raw_spin_lock(&desc->lock);
- /* Racy but it doesn't matter */
- status = desc->status;
- barrier();
- if (!(status & IRQ_SPURIOUS_DISABLED))
- continue;
+ for (act = desc->action; act; act = act->next)
+ intv = min(intv, irq_update_watch(desc, act, false));
- local_irq_disable();
- try_one_irq(i, desc);
- local_irq_enable();
+ if (intv < MAX_JIFFY_OFFSET)
+ irq_schedule_poll(desc, intv);
+ else
+ desc->status &= ~IRQ_CHECK_WATCHES;
+
+ raw_spin_unlock(&desc->lock);
+ }
+
+ /*
+ * Account for unhandled interrupt. We don't care whether
+ * spurious accounting update races with irq open/close and
+ * gets some values wrong. Do it w/o locking.
+ */
+ if (unlikely(action_ret != IRQ_HANDLED)) {
+ static int bogus_count = 100;
+
+ spr->last_bad = jiffies - INITIAL_JIFFIES;
+ spr->nr_bad++;
+ if (likely(action_ret == IRQ_NONE)) {
+ if (unlikely(irqfixup >= IRQFIXUP_MISROUTED &&
+ misrouted_irq(irq)))
+ spr->nr_bad--;
+ } else if (bogus_count > 0) {
+ bogus_count--;
+ printk(KERN_ERR "IRQ %u: bogus return value %x\n",
+ irq, action_ret);
+ dump_stack();
+ print_irq_handlers(desc);
+ }
}
- mod_timer(&poll_spurious_irq_timer,
- jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
+ /* did we finish this spurious period? */
+ spr->nr_samples++;
+ if (likely(spr->nr_samples < IRQ_SPR_PERIOD_SAMPLES))
+ return;
+
+ /* if so, was it a good one? */
+ dur = jiffies - spr->period_start;
+ if (likely(spr->nr_bad < IRQ_SPR_BAD_THRESHOLD ||
+ dur > IRQ_SPR_PERIOD_DURATION)) {
+ /*
+ * If longer than PERIOD_DURATION has passed, consider
+ * multiple good periods have happened.
+ */
+ int sft = IRQ_SPR_POLL_CNT_DEC_SHIFT *
+ (dur >> order_base_2(IRQ_SPR_PERIOD_DURATION));
+
+ /* but don't kill poll_cnt at once */
+ sft = clamp(sft, 1, IRQ_SPR_POLL_CNT_MAX_DEC_SHIFT);
+
+ spr->poll_cnt >>= sft;
+ irq_spr_new_period(spr);
+ return;
+ }
+
+ /*
+ * It was a bad one, start polling. This is a slow path and
+ * we're gonna be changing states which require proper
+ * synchronization, grab desc->lock.
+ */
+ raw_spin_lock(&desc->lock);
+
+ irq_spr_new_period(spr);
+
+ /* update spr_poll_cnt considering the lower and upper bounds */
+ cnt = max_t(unsigned int, spr->poll_cnt, IRQ_SPR_POLL_CNT_MIN);
+ spr->poll_cnt = cnt << IRQ_SPR_POLL_CNT_INC_SHIFT;
+ if (spr->poll_cnt < cnt) /* did it overflow? */
+ spr->poll_cnt = IRQ_SPR_POLL_CNT_INF;
+
+ /* whine, plug IRQ and kick poll timer */
+ abbr = cnt / 1000;
+ if (abbr > 1000) {
+ abbr /= 1000;
+ unit = 'm';
+ }
+ printk(KERN_ERR "IRQ %u: too many spurious IRQs, disabling and "
+ "polling for %u%c %umsec intervals.\n",
+ desc->irq, abbr, unit, jiffies_to_msecs(IRQ_POLL_INTV));
+ printk(KERN_ERR "IRQ %u: system performance may be affected\n",
+ desc->irq);
+ print_irq_handlers(desc);
+
+ desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
+ desc->depth++;
+ desc->chip->disable(desc->irq);
+
+ spr->poll_rem = cnt;
+ irq_schedule_poll(desc, IRQ_POLL_INTV);
+
+ raw_spin_unlock(&desc->lock);
}
/*
- * If 99,900 of the previous 100,000 interrupts have not been handled
- * then assume that the IRQ is stuck in some manner. Drop a diagnostic
- * and try to turn the IRQ off.
- *
- * (The other 100-of-100,000 interrupts may have been a correctly
- * functioning device sharing an IRQ with the failing one)
- *
- * Called under desc->lock
+ * IRQ poller. Called from desc->poll_timer.
*/
-
-static void
-__report_bad_irq(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret)
+void poll_irq(unsigned long arg)
{
- struct irqaction *action;
+ struct irq_desc *desc = (void *)arg;
+ struct irq_spr *spr = &desc->spr;
+ unsigned long intv = MAX_JIFFY_OFFSET;
+ bool reenable_irq = false;
+ struct irqaction *act;
+ struct irq_expect *exp;
+
+ raw_spin_lock_irq(&desc->lock);
+
+ /* poll the IRQ */
+ desc->status |= IRQ_IN_POLLING;
+ try_one_irq(desc->irq, desc);
+ desc->status &= ~IRQ_IN_POLLING;
+
+ /* take care of spurious handling */
+ if (spr->poll_rem) {
+ if (spr->poll_rem != IRQ_SPR_POLL_CNT_INF)
+ spr->poll_rem--;
+ if (spr->poll_rem)
+ intv = IRQ_POLL_INTV;
+ else
+ irq_spr_new_period(spr);
+ }
+ if (!spr->poll_rem)
+ reenable_irq = desc->status & IRQ_SPURIOUS_DISABLED;
- if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) {
- printk(KERN_ERR "irq event %d: bogus return value %x\n",
- irq, action_ret);
- } else {
- printk(KERN_ERR "irq %d: nobody cared (try booting with "
- "the \"irqpoll\" option)\n", irq);
+ /*
+ * Paired with mb in expect_irq() so that either they see
+ * timer pending cleared or irq_exp_intv() below sees
+ * IRQ_EXPECTING.
+ */
+ smp_mb();
+
+ /* take care of expects and watches */
+ for (act = desc->action; act; act = act->next) {
+ intv = min(irq_update_watch(desc, act, true), intv);
+ for (exp = act->expects; exp; exp = exp->next)
+ intv = min(irq_exp_intv(exp), intv);
}
- dump_stack();
- printk(KERN_ERR "handlers:\n");
- action = desc->action;
- while (action) {
- printk(KERN_ERR "[<%p>]", action->handler);
- print_symbol(" (%s)",
- (unsigned long)action->handler);
- printk("\n");
- action = action->next;
+ /* need to poll again? */
+ if (intv < MAX_JIFFY_OFFSET)
+ irq_schedule_poll(desc, intv);
+
+ raw_spin_unlock_irq(&desc->lock);
+
+ if (!reenable_irq)
+ return;
+
+ /* need to do locking dance for chip_bus_lock() to reenable IRQ */
+ chip_bus_lock(desc->irq, desc);
+ raw_spin_lock_irq(&desc->lock);
+
+ /* make sure we haven't raced with anyone inbetween */
+ if (!spr->poll_rem && (desc->status & IRQ_SPURIOUS_DISABLED)) {
+ printk(KERN_INFO "IRQ %u: spurious polling finished, "
+ "reenabling IRQ\n", desc->irq);
+ __enable_irq(desc, desc->irq, false);
+ desc->status &= ~IRQ_SPURIOUS_DISABLED;
}
+
+ raw_spin_unlock_irq(&desc->lock);
+ chip_bus_sync_unlock(desc->irq, desc);
}
-static void
-report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
+void irq_poll_action_added(struct irq_desc *desc, struct irqaction *action)
{
- static int count = 100;
+ struct irq_spr *spr = &desc->spr;
+ unsigned long flags;
- if (count > 0) {
- count--;
- __report_bad_irq(irq, desc, action_ret);
+ raw_spin_lock_irqsave(&desc->lock, flags);
+
+ if ((action->flags & IRQF_SHARED) && irqfixup >= IRQFIXUP_POLL) {
+ if (!spr->poll_rem)
+ printk(KERN_INFO "IRQ %u: starting IRQFIXUP_POLL\n",
+ desc->irq);
+ spr->poll_rem = IRQ_SPR_POLL_CNT_INF;
+ irq_schedule_poll(desc, IRQ_POLL_INTV);
+ } else {
+ /* new irqaction registered, give the IRQ another chance */
+ irq_spr_reset(spr);
}
+
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
}
-static inline int
-try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret)
+void irq_poll_action_removed(struct irq_desc *desc, struct irqaction *action)
{
- struct irqaction *action;
+ bool irq_enabled = false, timer_killed = false;
+ struct irq_expect *exp, *next;
+ unsigned long flags;
+ int rc;
- if (!irqfixup)
- return 0;
+ raw_spin_lock_irqsave(&desc->lock, flags);
- /* We didn't actually handle the IRQ - see if it was misrouted? */
- if (action_ret == IRQ_NONE)
- return 1;
+ /* give the IRQ another chance */
+ if (irqfixup < IRQFIXUP_POLL)
+ irq_spr_reset(&desc->spr);
/*
- * But for 'irqfixup == 2' we also do it for handled interrupts if
- * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
- * traditional PC timer interrupt.. Legacy)
+ * Make sure the timer is offline if no irqaction is left as
+ * the irq_desc will be reinitialized when the next irqaction
+ * is added; otherwise, the timer can be left alone. It will
+ * offline itself if no longer necessary.
*/
- if (irqfixup < 2)
- return 0;
-
- if (!irq)
- return 1;
+ while (!desc->action) {
+ rc = try_to_del_timer_sync(&desc->poll_timer);
+ if (rc >= 0) {
+ timer_killed = rc > 0;
+ break;
+ }
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+ cpu_relax();
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ }
/*
- * Since we don't get the descriptor lock, "action" can
- * change under us. We don't really care, but we don't
- * want to follow a NULL pointer. So tell the compiler to
- * just load it once by using a barrier.
+ * If the timer was forcefully shut down, it might not have
+ * had the chance to reenable IRQ. Make sure it's enabled.
*/
- action = desc->action;
- barrier();
- return action && (action->flags & IRQF_IRQPOLL);
-}
-
-void note_interrupt(unsigned int irq, struct irq_desc *desc,
- irqreturn_t action_ret)
-{
- if (unlikely(action_ret != IRQ_HANDLED)) {
- /*
- * If we are seeing only the odd spurious IRQ caused by
- * bus asynchronicity then don't eventually trigger an error,
- * otherwise the counter becomes a doomsday timer for otherwise
- * working systems
- */
- if (time_after(jiffies, desc->last_unhandled + HZ/10))
- desc->irqs_unhandled = 1;
- else
- desc->irqs_unhandled++;
- desc->last_unhandled = jiffies;
- if (unlikely(action_ret != IRQ_NONE))
- report_bad_irq(irq, desc, action_ret);
+ if (timer_killed && (desc->status & IRQ_SPURIOUS_DISABLED)) {
+ __enable_irq(desc, desc->irq, false);
+ desc->status &= ~IRQ_SPURIOUS_DISABLED;
+ irq_enabled = true;
}
- if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
- int ok = misrouted_irq(irq);
- if (action_ret == IRQ_NONE)
- desc->irqs_unhandled -= ok;
- }
-
- desc->irq_count++;
- if (likely(desc->irq_count < 100000))
- return;
+ if (timer_killed || irq_enabled)
+ printk(KERN_INFO "IRQ %u:%s%s%s\n", desc->irq,
+ timer_killed ? " polling stopped" : "",
+ timer_killed && irq_enabled ? " and" : "",
+ irq_enabled ? " IRQ reenabled" : "");
- desc->irq_count = 0;
- if (unlikely(desc->irqs_unhandled > 99900)) {
- /*
- * The interrupt is stuck
- */
- __report_bad_irq(irq, desc, action_ret);
- /*
- * Now kill the IRQ
- */
- printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
- desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
- desc->depth++;
- desc->chip->disable(irq);
-
- mod_timer(&poll_spurious_irq_timer,
- jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
+ /* free expect tokens */
+ for (exp = action->expects; exp; exp = next) {
+ next = exp->next;
+ kfree(exp);
}
- desc->irqs_unhandled = 0;
-}
+ action->expects = NULL;
-int noirqdebug __read_mostly;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
int noirqdebug_setup(char *str)
{
@@ -278,7 +984,7 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
static int __init irqfixup_setup(char *str)
{
- irqfixup = 1;
+ irqfixup = max(irqfixup, IRQFIXUP_MISROUTED);
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
@@ -290,7 +996,7 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
- irqfixup = 2;
+ irqfixup = IRQFIXUP_POLL;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
printk(KERN_WARNING "This may significantly impact system "