summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-block14
-rw-r--r--Documentation/block/biodoc.txt5
-rw-r--r--Documentation/feature-removal-schedule.txt2
-rw-r--r--Documentation/percpu-rw-semaphore.txt27
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/at91sam9g25ek.dts2
-rw-r--r--arch/arm/configs/armadillo800eva_defconfig2
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c2
-rw-r--r--arch/arm/mach-at91/at91sam9260_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9261_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9263_devices.c10
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c6
-rw-r--r--arch/arm/mach-at91/at91sam9rl_devices.c6
-rw-r--r--arch/arm/mach-at91/clock.c12
-rw-r--r--arch/arm/mach-gemini/irq.c1
-rw-r--r--arch/arm/mach-kirkwood/db88f6281-bp-setup.c1
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c13
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c3
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c2
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c4
-rw-r--r--arch/parisc/include/asm/atomic.h4
-rw-r--r--arch/parisc/kernel/process.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c8
-rw-r--r--arch/powerpc/include/asm/processor.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S23
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S3
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/process.c12
-rw-r--r--arch/powerpc/kernel/smp.c11
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/time.c9
-rw-r--r--arch/powerpc/kernel/traps.c3
-rw-r--r--arch/powerpc/lib/code-patching.c2
-rw-r--r--arch/powerpc/mm/numa.c7
-rw-r--r--arch/powerpc/platforms/powernv/smp.c10
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c6
-rw-r--r--arch/s390/include/asm/elf.h3
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/um/os-Linux/time.c2
-rw-r--r--arch/x86/kvm/x86.c3
-rw-r--r--block/blk-core.c51
-rw-r--r--block/blk-lib.c104
-rw-r--r--block/blk-merge.c53
-rw-r--r--block/blk-settings.c16
-rw-r--r--block/blk-sysfs.c44
-rw-r--r--block/blk-tag.c6
-rw-r--r--block/blk.h5
-rw-r--r--block/elevator.c6
-rw-r--r--block/ioctl.c27
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/drbd/drbd_actlog.c19
-rw-r--r--drivers/block/drbd/drbd_bitmap.c24
-rw-r--r--drivers/block/drbd/drbd_int.h108
-rw-r--r--drivers/block/drbd/drbd_main.c183
-rw-r--r--drivers/block/drbd/drbd_nl.c74
-rw-r--r--drivers/block/drbd/drbd_proc.c14
-rw-r--r--drivers/block/drbd/drbd_receiver.c147
-rw-r--r--drivers/block/drbd/drbd_req.c43
-rw-r--r--drivers/block/drbd/drbd_worker.c87
-rw-r--r--drivers/block/loop.c21
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c57
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h13
-rw-r--r--drivers/block/osdblk.c3
-rw-r--r--drivers/block/pktcdvd.c52
-rw-r--r--drivers/block/xen-blkback/common.h4
-rw-r--r--drivers/block/xen-blkback/xenbus.c9
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-em.c4
-rw-r--r--drivers/gpio/gpio-rdc321x.c1
-rw-r--r--drivers/gpio/gpiolib-of.c2
-rw-r--r--drivers/hid/hid-logitech-dj.c4
-rw-r--r--drivers/md/dm-crypt.c16
-rw-r--r--drivers/md/dm-io.c11
-rw-r--r--drivers/md/dm.c74
-rw-r--r--drivers/md/md.c44
-rw-r--r--drivers/md/raid0.c1
-rw-r--r--drivers/mmc/card/block.c26
-rw-r--r--drivers/mmc/host/atmel-mci.c6
-rw-r--r--drivers/mmc/host/bfin_sdh.c7
-rw-r--r--drivers/mmc/host/dw_mmc.c85
-rw-r--r--drivers/mmc/host/mxs-mmc.c14
-rw-r--r--drivers/mmc/host/omap.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h6
-rw-r--r--drivers/mtd/ubi/vtbl.c4
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c4
-rw-r--r--drivers/net/can/softing/softing_fw.c7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c18
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c10
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c48
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h4
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c4
-rw-r--r--drivers/net/fddi/skfp/pmf.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h1
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c3
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c3
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c30
-rw-r--r--drivers/net/xen-netfront.c39
-rw-r--r--drivers/rtc/rtc-at91sam9.c22
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c7
-rw-r--r--drivers/target/target_core_iblock.c9
-rw-r--r--drivers/video/auo_k190x.c2
-rw-r--r--drivers/video/console/bitblit.c2
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c14
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c2
-rw-r--r--fs/bio-integrity.c44
-rw-r--r--fs/bio.c215
-rw-r--r--fs/block_dev.c69
-rw-r--r--fs/cifs/cifssmb.c11
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/cifs/inode.c24
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/cifs/smb2misc.c16
-rw-r--r--fs/cifs/smb2pdu.h10
-rw-r--r--fs/cifs/transport.c9
-rw-r--r--fs/exofs/ore.c5
-rw-r--r--include/linux/bio.h70
-rw-r--r--include/linux/blk_types.h36
-rw-r--r--include/linux/blkdev.h82
-rw-r--r--include/linux/drbd.h4
-rw-r--r--include/linux/drbd_nl.h1
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/kernel.h12
-rw-r--r--include/linux/loop.h2
-rw-r--r--include/linux/mmc/card.h1
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu-rwsem.h89
-rw-r--r--include/linux/scatterlist.h1
-rw-r--r--include/linux/time.h7
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h1
-rw-r--r--init/Kconfig1
-rw-r--r--kernel/time/timekeeping.c10
-rw-r--r--lib/scatterlist.c22
-rw-r--r--mm/mempolicy.c2
-rw-r--r--net/core/netpoll.c10
-rw-r--r--net/ipv4/ipmr.c14
-rw-r--r--net/ipv4/netfilter/nf_nat_sip.c5
-rw-r--r--net/ipv4/route.c6
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv6/esp6.c6
-rw-r--r--net/l2tp/l2tp_core.c3
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/mac80211/tx.c38
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c16
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/socket.c4
-rw-r--r--net/xfrm/xfrm_state.c4
-rw-r--r--scripts/Makefile.fwinst2
-rw-r--r--scripts/link-vmlinux.sh2
-rw-r--r--sound/pci/hda/hda_codec.c10
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/endpoint.c24
-rw-r--r--sound/usb/endpoint.h3
-rw-r--r--sound/usb/pcm.c64
-rw-r--r--virt/kvm/kvm_main.c7
192 files changed, 1967 insertions, 1134 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
index c1eb41cb9876..279da08f7541 100644
--- a/Documentation/ABI/testing/sysfs-block
+++ b/Documentation/ABI/testing/sysfs-block
@@ -206,3 +206,17 @@ Description:
when a discarded area is read the discard_zeroes_data
parameter will be set to one. Otherwise it will be 0 and
the result of reading a discarded area is undefined.
+
+What: /sys/block/<disk>/queue/write_same_max_bytes
+Date: January 2012
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Some devices support a write same operation in which a
+ single data block can be written to a range of several
+ contiguous blocks on storage. This can be used to wipe
+ areas on disk or to initialize drives in a RAID
+ configuration. write_same_max_bytes indicates how many
+ bytes can be written in a single write same command. If
+ write_same_max_bytes is 0, write same is not supported
+ by the device.
+
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index e418dc0a7086..8df5e8e6dceb 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -465,7 +465,6 @@ struct bio {
bio_end_io_t *bi_end_io; /* bi_end_io (bio) */
atomic_t bi_cnt; /* pin count: free when it hits zero */
void *bi_private;
- bio_destructor_t *bi_destructor; /* bi_destructor (bio) */
};
With this multipage bio design:
@@ -647,10 +646,6 @@ for a non-clone bio. There are the 6 pools setup for different size biovecs,
so bio_alloc(gfp_mask, nr_iovecs) will allocate a vec_list of the
given size from these slabs.
-The bi_destructor() routine takes into account the possibility of the bio
-having originated from a different source (see later discussions on
-n/w to block transfers and kvec_cb)
-
The bio_get() routine may be used to hold an extra reference on a bio prior
to i/o submission, if the bio fields are likely to be accessed after the
i/o is issued (since the bio may otherwise get freed in case i/o completion
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index afaff312bf41..f4d8c7105fcd 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more
----------------------------
What: at91-mci driver ("CONFIG_MMC_AT91")
-When: 3.7
+When: 3.8
Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
was added to atmel-mci as a first step to support more chips.
Then at91-mci was kept only for old IP versions (on at91rm9200 and
diff --git a/Documentation/percpu-rw-semaphore.txt b/Documentation/percpu-rw-semaphore.txt
new file mode 100644
index 000000000000..7d3c82431909
--- /dev/null
+++ b/Documentation/percpu-rw-semaphore.txt
@@ -0,0 +1,27 @@
+Percpu rw semaphores
+--------------------
+
+Percpu rw semaphores is a new read-write semaphore design that is
+optimized for locking for reading.
+
+The problem with traditional read-write semaphores is that when multiple
+cores take the lock for reading, the cache line containing the semaphore
+is bouncing between L1 caches of the cores, causing performance
+degradation.
+
+Locking for reading is very fast, it uses RCU and it avoids any atomic
+instruction in the lock and unlock path. On the other hand, locking for
+writing is very expensive, it calls synchronize_rcu() that can take
+hundreds of milliseconds.
+
+The lock is declared with "struct percpu_rw_semaphore" type.
+The lock is initialized percpu_init_rwsem, it returns 0 on success and
+-ENOMEM on allocation failure.
+The lock must be freed with percpu_free_rwsem to avoid memory leak.
+
+The lock is locked for read with percpu_down_read, percpu_up_read and
+for write with percpu_down_write, percpu_up_write.
+
+The idea of using RCU for optimized rw-lock was introduced by
+Eric Dumazet <eric.dumazet@gmail.com>.
+The code was written by Mikulas Patocka <mpatocka@redhat.com>
diff --git a/Makefile b/Makefile
index 354026873b13..371ce8899f5c 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/at91sam9g25ek.dts b/arch/arm/boot/dts/at91sam9g25ek.dts
index 7829a4d0cb22..96514c134e54 100644
--- a/arch/arm/boot/dts/at91sam9g25ek.dts
+++ b/arch/arm/boot/dts/at91sam9g25ek.dts
@@ -15,7 +15,7 @@
compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
chosen {
- bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
};
ahb {
diff --git a/arch/arm/configs/armadillo800eva_defconfig b/arch/arm/configs/armadillo800eva_defconfig
index 7d8718468e0d..90610c7030f7 100644
--- a/arch/arm/configs/armadillo800eva_defconfig
+++ b/arch/arm/configs/armadillo800eva_defconfig
@@ -33,7 +33,7 @@ CONFIG_AEABI=y
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
+CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
CONFIG_CMDLINE_FORCE=y
CONFIG_KEXEC=y
CONFIG_VFP=y
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 104ca40d8d18..aaa443b48c91 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
at91_st_read(AT91_ST_SR);
/* Make IRQs happen for the system timer */
- setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
+ setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
* directly for the clocksource and all clockevents, after adjusting
diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c
index 7b9c2ba396ed..bce572a530ef 100644
--- a/arch/arm/mach-at91/at91sam9260_devices.c
+++ b/arch/arm/mach-at91/at91sam9260_devices.c
@@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
},
};
@@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9260_rtt_device.num_resources = 2;
+ at91sam9260_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c
index 8df5c1bdff92..bc2590d712d0 100644
--- a/arch/arm/mach-at91/at91sam9261_devices.c
+++ b/arch/arm/mach-at91/at91sam9261_devices.c
@@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9261_rtt_device.num_resources = 2;
+ at91sam9261_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c
index eb6bbf86fb9f..9b6ca734f1a9 100644
--- a/arch/arm/mach-at91/at91sam9263_devices.c
+++ b/arch/arm/mach-at91/at91sam9263_devices.c
@@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed only for the chosen RTT:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9263_rtt0_device.num_resources = 2;
+ at91sam9263_rtt0_device.num_resources = 3;
at91sam9263_rtt1_device.num_resources = 1;
pdev = &at91sam9263_rtt0_device;
r = rtt0_resources;
break;
case 1:
at91sam9263_rtt0_device.num_resources = 1;
- at91sam9263_rtt1_device.num_resources = 2;
+ at91sam9263_rtt1_device.num_resources = 3;
pdev = &at91sam9263_rtt1_device;
r = rtt1_resources;
break;
@@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
pdev->name = "rtc-at91sam9";
r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
r[1].end = r[1].start + 3;
+ r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index 06073996a382..1b47319ca00b 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9g45_rtt_device.num_resources = 2;
+ at91sam9g45_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c
index f09fff932172..b3d365dadef5 100644
--- a/arch/arm/mach-at91/at91sam9rl_devices.c
+++ b/arch/arm/mach-at91/at91sam9rl_devices.c
@@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_MEM,
+ }, {
+ .flags = IORESOURCE_IRQ,
}
};
@@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
* The second resource is needed:
* GPBR will serve as the storage for RTC time offset
*/
- at91sam9rl_rtt_device.num_resources = 2;
+ at91sam9rl_rtt_device.num_resources = 3;
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
rtt_resources[1].end = rtt_resources[1].start + 3;
+ rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
+ rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
}
#else
static void __init at91_add_device_rtt_rtc(void)
diff --git a/arch/arm/mach-at91/clock.c b/arch/arm/mach-at91/clock.c
index de2ec6b8fea7..188c82971ebd 100644
--- a/arch/arm/mach-at91/clock.c
+++ b/arch/arm/mach-at91/clock.c
@@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
+#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
+ || cpu_is_at91sam9263() \
+ || cpu_is_at91sam9rl())
+
+#define cpu_has_210M_plla() (cpu_is_at91sam9260())
+
#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
|| cpu_is_at91sam9g45() \
|| cpu_is_at91sam9x5() \
@@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
} else if (cpu_has_800M_plla()) {
if (plla.rate_hz > 800000000)
pll_overclock = true;
+ } else if (cpu_has_240M_plla()) {
+ if (plla.rate_hz > 240000000)
+ pll_overclock = true;
+ } else if (cpu_has_210M_plla()) {
+ if (plla.rate_hz > 210000000)
+ pll_overclock = true;
} else {
if (plla.rate_hz > 209000000)
pll_overclock = true;
diff --git a/arch/arm/mach-gemini/irq.c b/arch/arm/mach-gemini/irq.c
index ca70e5fcc7ac..020852d3bdd8 100644
--- a/arch/arm/mach-gemini/irq.c
+++ b/arch/arm/mach-gemini/irq.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
+#include <asm/system_misc.h>
#include <mach/hardware.h>
#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
diff --git a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
index d93359379598..be90b7d0e10b 100644
--- a/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
+++ b/arch/arm/mach-kirkwood/db88f6281-bp-setup.c
@@ -10,6 +10,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/sizes.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index cf10f92856dc..453a6e50db8b 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
};
/* GPIO KEY */
-#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
+#define GPIO_KEY(c, g, d, ...) \
+ { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
static struct gpio_keys_button gpio_buttons[] = {
- GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"),
- GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"),
- GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"),
- GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"),
+ GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
+ GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
+ GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
+ GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
};
static struct gpio_keys_platform_data gpio_key_info = {
@@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
&camera_device,
&ceu0_device,
&fsi_device,
- &fsi_hdmi_device,
&fsi_wm8978_device,
+ &fsi_hdmi_device,
};
static void __init eva_clock_init(void)
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 7ea2b31e3199..c129542f6aed 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
* - J30 "open"
* - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
* - add .get_vbus = usbhs_get_vbus in usbhs1_private
+ * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
*/
#define IRQ8 evt2irq(0x0300)
#define USB_PHY_MODE (1 << 4)
@@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
&nor_flash_device,
&smc911x_device,
&lcdc_device,
- &usbhs1_device,
&usbhs0_device,
+ &usbhs1_device,
&leds_device,
&fsi_device,
&fsi_ak4643_device,
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 3a528cf4366c..fcf5a47f4772 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
static struct platform_device eth_device = {
.name = "smsc911x",
- .id = 0,
+ .id = -1,
.dev = {
.platform_data = &smsc911x_platdata,
},
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index ee447404c857..588555a67d9c 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
return 0; /* always allow wakeup */
}
-#define RELOC_BASE 0x1000
+#define RELOC_BASE 0x1200
-/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
+/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 6c6defc24619..af9cf30ed474 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
@@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC64_INIT(i) { (i) }
static __inline__ s64
__atomic64_add_return(s64 i, atomic64_t *v)
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b395c16..2c05a9292a81 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
cregs->ksp = (unsigned long)stack
+ (pregs->gr[21] & (THREAD_SIZE - 1));
cregs->gr[30] = usp;
- if (p->personality == PER_HPUX) {
+ if (personality(p->personality) == PER_HPUX) {
#ifdef CONFIG_HPUX
cregs->kpc = (unsigned long) &hpux_child_return;
#else
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index c9b932260f47..7426e40699bd 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
long err;
if (personality(current->personality) == PER_LINUX32
- && personality == PER_LINUX)
- personality = PER_LINUX32;
+ && personality(personality) == PER_LINUX)
+ personality = (personality & ~PER_MASK) | PER_LINUX32;
err = sys_personality(personality);
- if (err == PER_LINUX32)
- err = PER_LINUX;
+ if (personality(err) == PER_LINUX32)
+ err = (err & ~PER_MASK) | PER_LINUX;
return err;
}
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 53b6dfa83344..54b73a28c205 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
extern int powersave_nap; /* set if nap mode can be used in idle loop */
+extern void power7_nap(void);
#ifdef CONFIG_PSERIES_IDLE
extern void update_smt_snooze_delay(int snooze);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 85b05c463fae..e8995727b1c1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -76,6 +76,7 @@ int main(void)
DEFINE(SIGSEGV, SIGSEGV);
DEFINE(NMI_MASK, NMI_MASK);
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
+ DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
#else
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 5b25c8060fd6..a892680668d8 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
void doorbell_cause_ipi(int cpu, unsigned long data)
{
+ /* Order previous accesses vs. msgsnd, which is treated as a store */
+ mb();
ppc_msgsnd(PPC_DBELL, 0, data);
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 4b01a25e29ef..b40e0b4815b3 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
li r3,0
b syscall_exit
+ .section ".toc","aw"
+DSCR_DEFAULT:
+ .tc dscr_default[TC],dscr_default
+
+ .section ".text"
+
/*
* This routine switches between two different tasks. The process
* state of one is saved on its kernel stack. Then the state
@@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
mr r1,r8 /* start using new stack pointer */
std r7,PACAKSAVE(r13)
- ld r6,_CCR(r1)
- mtcrf 0xFF,r6
-
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
ld r0,THREAD_VRSAVE(r4)
@@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_PPC64
BEGIN_FTR_SECTION
+ lwz r6,THREAD_DSCR_INHERIT(r4)
+ ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
- cmpd r0,r25
- beq 1f
+ cmpwi r6,0
+ bne 1f
+ ld r0,0(r7)
+1: cmpd r0,r25
+ beq 2f
mtspr SPRN_DSCR,r0
-1:
+2:
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#endif
+ ld r6,_CCR(r1)
+ mtcrf 0xFF,r6
+
/* r3-r13 are destroyed -- Cort */
REST_8GPRS(14, r1)
REST_10GPRS(22, r1)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e894515e77bb..39aa97d3ff88 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -186,7 +186,7 @@ hardware_interrupt_hv:
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
- MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
+ STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
@@ -486,6 +486,7 @@ machine_check_common:
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 7140d838339e..e11863f4e595 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
lwz r4,ADDROFF(powersave_nap)(r3)
cmpwi 0,r4,0
beqlr
+ /* fall through */
+_GLOBAL(power7_nap)
/* NAP is a state loss, we create a regs frame on the
* stack, fill it up with the state we care about and
* stick a pointer to it in PACAR1. We really only
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 710f400476de..1a1f2ddfb581 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC64
if (cpu_has_feature(CPU_FTR_DSCR)) {
- if (current->thread.dscr_inherit) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = current->thread.dscr;
- } else if (0 != dscr_default) {
- p->thread.dscr_inherit = 1;
- p->thread.dscr = dscr_default;
- } else {
- p->thread.dscr_inherit = 0;
- p->thread.dscr = 0;
- }
+ p->thread.dscr_inherit = current->thread.dscr_inherit;
+ p->thread.dscr = current->thread.dscr;
}
#endif
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 0321007086f7..8d4214afc21d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
+ /*
+ * Order previous accesses before accesses in the IPI handler.
+ */
+ smp_mb();
message[msg] = 1;
- mb();
+ /*
+ * cause_ipi functions are required to include a full barrier
+ * before doing whatever causes the IPI.
+ */
smp_ops->cause_ipi(cpu, info->data);
}
@@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
mb(); /* order any irq clear */
do {
- all = xchg_local(&info->messages, 0);
+ all = xchg(&info->messages, 0);
#ifdef __BIG_ENDIAN
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 3529446c2abd..8302af649219 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
return sprintf(buf, "%lx\n", dscr_default);
}
+static void update_dscr(void *dummy)
+{
+ if (!current->thread.dscr_inherit) {
+ current->thread.dscr = dscr_default;
+ mtspr(SPRN_DSCR, dscr_default);
+ }
+}
+
static ssize_t __used store_dscr_default(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
return -EINVAL;
dscr_default = val;
+ on_each_cpu(update_dscr, NULL, 1);
+
return count;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index be171ee73bf8..e49e93191b69 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
trace_timer_interrupt_exit(regs);
}
+/*
+ * Hypervisor decrementer interrupts shouldn't occur but are sometimes
+ * left pending on exit from a KVM guest. We don't need to do anything
+ * to clear them, as they are edge-triggered.
+ */
+void hdec_interrupt(struct pt_regs *regs)
+{
+}
+
#ifdef CONFIG_SUSPEND
static void generic_suspend_disable_irqs(void)
{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 158972341a2d..ae0843fa7a61 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
cpu_has_feature(CPU_FTR_DSCR)) {
PPC_WARN_EMULATED(mtdscr, regs);
rd = (instword >> 21) & 0x1f;
- mtspr(SPRN_DSCR, regs->gpr[rd]);
+ current->thread.dscr = regs->gpr[rd];
current->thread.dscr_inherit = 1;
+ mtspr(SPRN_DSCR, current->thread.dscr);
return 0;
}
#endif
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index dd223b3eb333..17e5b2364312 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
{
int err;
- err = __put_user(instr, addr);
+ __put_user_size(instr, addr, 4, err);
if (err)
return err;
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 39b159751c35..59213cfaeca9 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
/*
* Update the node maps and sysfs entries for each cpu whose home node
- * has changed.
+ * has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- int cpu, nid, old_nid;
+ int cpu, nid, old_nid, changed = 0;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
struct device *dev;
@@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
+ changed = 1;
}
- return 1;
+ return changed;
}
static void topology_work_fn(struct work_struct *work)
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 3ef46254c35b..7698b6e13c57 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
{
unsigned int cpu;
- /* If powersave_nap is enabled, use NAP mode, else just
- * spin aimlessly
- */
- if (!powersave_nap) {
- generic_mach_cpu_die();
- return;
- }
-
/* Standard hot unplug procedure */
local_irq_disable();
idle_task_exit();
@@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
*/
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
while (!generic_check_cpu_restart(cpu)) {
- power7_idle();
+ power7_nap();
if (!generic_check_cpu_restart(cpu)) {
DBG("CPU%d Unexpected exit while offline !\n", cpu);
/* We may be getting an IPI, so we re-enable
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 14469cf9df68..df0fc5821469 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
{
int hw_cpu = get_hard_smp_processor_id(n_cpu);
- long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
+ long rc;
+
+ /* Make sure all previous accesses are ordered before IPI sending */
+ mb();
+ rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
if (rc != H_SUCCESS) {
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 32e8449640fa..9b94a160fe7f 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -180,7 +180,8 @@ extern char elf_platform[];
#define ELF_PLATFORM (elf_platform)
#ifndef CONFIG_64BIT
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+ set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#else /* CONFIG_64BIT */
#define SET_PERSONALITY(ex) \
do { \
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index 7bcc14e395f0..bf2a2ad2f800 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -13,6 +13,7 @@
*/
typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
#define __kernel_size_t __kernel_size_t
typedef unsigned short __kernel_old_dev_t;
@@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
typedef unsigned short __kernel_ipc_pid_t;
typedef unsigned short __kernel_uid_t;
typedef unsigned short __kernel_gid_t;
-typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
#else /* __s390x__ */
@@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
typedef int __kernel_ipc_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
-typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index a0a8340daafa..ce26ac3cb162 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
}
static inline int smp_find_processor_id(int address) { return 0; }
+static inline int smp_store_status(int cpu) { return 0; }
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index f60238559af3..0748fe0c8a73 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -114,7 +114,7 @@ static void deliver_alarm(void)
skew += this_tick - last_tick;
while (skew >= one_tick) {
- alarm_handler(SIGVTALRM, NULL);
+ alarm_handler(SIGVTALRM, NULL, NULL);
skew -= one_tick;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index dce75b760312..148ed666e311 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2000,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val;
break;
+ case MSR_KVM_PV_EOI_EN:
+ data = vcpu->arch.pv_eoi.msr_val;
+ break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b4dbdfbca89..a17869f337f7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -608,8 +608,8 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
/*
* A queue starts its life with bypass turned on to avoid
* unnecessary bypass on/off overhead and nasty surprises during
- * init. The initial bypass will be finished at the end of
- * blk_init_allocated_queue().
+ * init. The initial bypass will be finished when the queue is
+ * registered by blk_register_queue().
*/
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -696,7 +696,7 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
- q->queue_flags = QUEUE_FLAG_DEFAULT;
+ q->queue_flags |= QUEUE_FLAG_DEFAULT;
/* Override internal queue lock with supplied lock pointer */
if (lock)
@@ -712,11 +712,6 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */
if (elevator_init(q, NULL))
return NULL;
-
- blk_queue_congestion_threshold(q);
-
- /* all done, end the initial bypass */
- blk_queue_bypass_end(q);
return q;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1659,8 +1654,8 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
- if (unlikely(!(bio->bi_rw & REQ_DISCARD) &&
- nr_sectors > queue_max_hw_sectors(q))) {
+ if (likely(bio_is_rw(bio) &&
+ nr_sectors > queue_max_hw_sectors(q))) {
printk(KERN_ERR "bio too big device %s (%u > %u)\n",
bdevname(bio->bi_bdev, b),
bio_sectors(bio),
@@ -1701,8 +1696,12 @@ generic_make_request_checks(struct bio *bio)
if ((bio->bi_rw & REQ_DISCARD) &&
(!blk_queue_discard(q) ||
- ((bio->bi_rw & REQ_SECURE) &&
- !blk_queue_secdiscard(q)))) {
+ ((bio->bi_rw & REQ_SECURE) && !blk_queue_secdiscard(q)))) {
+ err = -EOPNOTSUPP;
+ goto end_io;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME && !bdev_write_same(bio->bi_bdev)) {
err = -EOPNOTSUPP;
goto end_io;
}
@@ -1812,15 +1811,20 @@ EXPORT_SYMBOL(generic_make_request);
*/
void submit_bio(int rw, struct bio *bio)
{
- int count = bio_sectors(bio);
-
bio->bi_rw |= rw;
/*
* If it's a regular read/write or a barrier with data attached,
* go through the normal accounting stuff before submission.
*/
- if (bio_has_data(bio) && !(rw & REQ_DISCARD)) {
+ if (bio_has_data(bio)) {
+ unsigned int count;
+
+ if (unlikely(rw & REQ_WRITE_SAME))
+ count = bdev_logical_block_size(bio->bi_bdev) >> 9;
+ else
+ count = bio_sectors(bio);
+
if (rw & WRITE) {
count_vm_events(PGPGOUT, count);
} else {
@@ -1866,11 +1870,10 @@ EXPORT_SYMBOL(submit_bio);
*/
int blk_rq_check_limits(struct request_queue *q, struct request *rq)
{
- if (rq->cmd_flags & REQ_DISCARD)
+ if (!rq_mergeable(rq))
return 0;
- if (blk_rq_sectors(rq) > queue_max_sectors(q) ||
- blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) {
+ if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) {
printk(KERN_ERR "%s: over max size limit.\n", __func__);
return -EIO;
}
@@ -2340,7 +2343,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
- if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD))
+ if (req->cmd_type == REQ_TYPE_FS)
req->__sector += total_bytes >> 9;
/* mixed attributes always follow the first bio */
@@ -2781,16 +2784,10 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
blk_rq_init(NULL, rq);
__rq_for_each_bio(bio_src, rq_src) {
- bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+ bio = bio_clone_bioset(bio_src, gfp_mask, bs);
if (!bio)
goto free_and_out;
- __bio_clone(bio, bio_src);
-
- if (bio_integrity(bio_src) &&
- bio_integrity_clone(bio, bio_src, gfp_mask, bs))
- goto free_and_out;
-
if (bio_ctr && bio_ctr(bio, bio_src, data))
goto free_and_out;
@@ -2807,7 +2804,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
free_and_out:
if (bio)
- bio_free(bio, bs);
+ bio_put(bio);
blk_rq_unprep_clone(rq);
return -ENOMEM;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 19cc761cacb2..9373b58dfab1 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -130,6 +130,80 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
EXPORT_SYMBOL(blkdev_issue_discard);
/**
+ * blkdev_issue_write_same - queue a write same operation
+ * @bdev: target blockdev
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ * @page: page containing data to write
+ *
+ * Description:
+ * Issue a write same request for the sectors in question.
+ */
+int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask,
+ struct page *page)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_write_same_sectors;
+ struct bio_batch bb;
+ struct bio *bio;
+ int ret = 0;
+
+ if (!q)
+ return -ENXIO;
+
+ max_write_same_sectors = q->limits.max_write_same_sectors;
+
+ if (max_write_same_sectors == 0)
+ return -EOPNOTSUPP;
+
+ atomic_set(&bb.done, 1);
+ bb.flags = 1 << BIO_UPTODATE;
+ bb.wait = &wait;
+
+ while (nr_sects) {
+ bio = bio_alloc(gfp_mask, 1);
+ if (!bio) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ bio->bi_sector = sector;
+ bio->bi_end_io = bio_batch_end_io;
+ bio->bi_bdev = bdev;
+ bio->bi_private = &bb;
+ bio->bi_vcnt = 1;
+ bio->bi_io_vec->bv_page = page;
+ bio->bi_io_vec->bv_offset = 0;
+ bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
+
+ if (nr_sects > max_write_same_sectors) {
+ bio->bi_size = max_write_same_sectors << 9;
+ nr_sects -= max_write_same_sectors;
+ sector += max_write_same_sectors;
+ } else {
+ bio->bi_size = nr_sects << 9;
+ nr_sects = 0;
+ }
+
+ atomic_inc(&bb.done);
+ submit_bio(REQ_WRITE | REQ_WRITE_SAME, bio);
+ }
+
+ /* Wait for bios in-flight */
+ if (!atomic_dec_and_test(&bb.done))
+ wait_for_completion(&wait);
+
+ if (!test_bit(BIO_UPTODATE, &bb.flags))
+ ret = -ENOTSUPP;
+
+ return ret;
+}
+EXPORT_SYMBOL(blkdev_issue_write_same);
+
+/**
* blkdev_issue_zeroout - generate number of zero filed write bios
* @bdev: blockdev to issue
* @sector: start sector
@@ -140,7 +214,7 @@ EXPORT_SYMBOL(blkdev_issue_discard);
* Generate and issue number of bios with zerofiled pages.
*/
-int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask)
{
int ret;
@@ -190,4 +264,32 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
return ret;
}
+
+/**
+ * blkdev_issue_zeroout - zero-fill a block range
+ * @bdev: blockdev to write
+ * @sector: start sector
+ * @nr_sects: number of sectors to write
+ * @gfp_mask: memory allocation flags (for bio_alloc)
+ *
+ * Description:
+ * Generate and issue number of bios with zerofiled pages.
+ */
+
+int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask)
+{
+ if (bdev_write_same(bdev)) {
+ unsigned char bdn[BDEVNAME_SIZE];
+
+ if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
+ ZERO_PAGE(0)))
+ return 0;
+
+ bdevname(bdev, bdn);
+ pr_err("%s: WRITE SAME failed. Manually zeroing.\n", bdn);
+ }
+
+ return __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask);
+}
EXPORT_SYMBOL(blkdev_issue_zeroout);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e76279e41162..936a110de0b9 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -275,14 +275,8 @@ no_merge:
int ll_back_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
- unsigned short max_sectors;
-
- if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
- max_sectors = queue_max_hw_sectors(q);
- else
- max_sectors = queue_max_sectors(q);
-
- if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) >
+ blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -299,15 +293,8 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
int ll_front_merge_fn(struct request_queue *q, struct request *req,
struct bio *bio)
{
- unsigned short max_sectors;
-
- if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
- max_sectors = queue_max_hw_sectors(q);
- else
- max_sectors = queue_max_sectors(q);
-
-
- if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) {
+ if (blk_rq_sectors(req) + bio_sectors(bio) >
+ blk_rq_get_max_sectors(req)) {
req->cmd_flags |= REQ_NOMERGE;
if (req == q->last_merge)
q->last_merge = NULL;
@@ -338,7 +325,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
/*
* Will it become too large?
*/
- if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q))
+ if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
+ blk_rq_get_max_sectors(req))
return 0;
total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
@@ -417,16 +405,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
if (!rq_mergeable(req) || !rq_mergeable(next))
return 0;
- /*
- * Don't merge file system requests and discard requests
- */
- if ((req->cmd_flags & REQ_DISCARD) != (next->cmd_flags & REQ_DISCARD))
- return 0;
-
- /*
- * Don't merge discard requests and secure discard requests
- */
- if ((req->cmd_flags & REQ_SECURE) != (next->cmd_flags & REQ_SECURE))
+ if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags))
return 0;
/*
@@ -440,6 +419,10 @@ static int attempt_merge(struct request_queue *q, struct request *req,
|| next->special)
return 0;
+ if (req->cmd_flags & REQ_WRITE_SAME &&
+ !blk_write_same_mergeable(req->bio, next->bio))
+ return 0;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -521,15 +504,10 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
{
- if (!rq_mergeable(rq))
+ if (!rq_mergeable(rq) || !bio_mergeable(bio))
return false;
- /* don't merge file system requests and discard requests */
- if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
- return false;
-
- /* don't merge discard requests and secure discard requests */
- if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+ if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw))
return false;
/* different data direction or already started, don't merge */
@@ -544,6 +522,11 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_integrity(bio) != blk_integrity_rq(rq))
return false;
+ /* must be using the same buffer */
+ if (rq->cmd_flags & REQ_WRITE_SAME &&
+ !blk_write_same_mergeable(rq->bio, bio))
+ return false;
+
return true;
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 565a6786032f..779bb7646bcd 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -113,6 +113,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
lim->discard_granularity = 0;
lim->discard_alignment = 0;
@@ -144,6 +145,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
lim->max_segments = USHRT_MAX;
lim->max_hw_sectors = UINT_MAX;
lim->max_sectors = UINT_MAX;
+ lim->max_write_same_sectors = UINT_MAX;
}
EXPORT_SYMBOL(blk_set_stacking_limits);
@@ -286,6 +288,18 @@ void blk_queue_max_discard_sectors(struct request_queue *q,
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
/**
+ * blk_queue_max_write_same_sectors - set max sectors for a single write same
+ * @q: the request queue for the device
+ * @max_write_same_sectors: maximum number of sectors to write per command
+ **/
+void blk_queue_max_write_same_sectors(struct request_queue *q,
+ unsigned int max_write_same_sectors)
+{
+ q->limits.max_write_same_sectors = max_write_same_sectors;
+}
+EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
+
+/**
* blk_queue_max_segments - set max hw segments for a request for this queue
* @q: the request queue for the device
* @max_segments: max number of segments
@@ -510,6 +524,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
+ t->max_write_same_sectors = min(t->max_write_same_sectors,
+ b->max_write_same_sectors);
t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 9628b291f960..ce6204608822 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -26,9 +26,15 @@ queue_var_show(unsigned long var, char *page)
static ssize_t
queue_var_store(unsigned long *var, const char *page, size_t count)
{
- char *p = (char *) page;
+ int err;
+ unsigned long v;
+
+ err = strict_strtoul(page, 10, &v);
+ if (err || v > UINT_MAX)
+ return -EINVAL;
+
+ *var = v;
- *var = simple_strtoul(p, &p, 10);
return count;
}
@@ -48,6 +54,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
return -EINVAL;
ret = queue_var_store(&nr, page, count);
+ if (ret < 0)
+ return ret;
+
if (nr < BLKDEV_MIN_RQ)
nr = BLKDEV_MIN_RQ;
@@ -102,6 +111,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
unsigned long ra_kb;
ssize_t ret = queue_var_store(&ra_kb, page, count);
+ if (ret < 0)
+ return ret;
+
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
return ret;
@@ -168,6 +180,13 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
return queue_var_show(queue_discard_zeroes_data(q), page);
}
+static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
+{
+ return sprintf(page, "%llu\n",
+ (unsigned long long)q->limits.max_write_same_sectors << 9);
+}
+
+
static ssize_t
queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
@@ -176,6 +195,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+ if (ret < 0)
+ return ret;
+
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
return -EINVAL;
@@ -236,6 +258,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
unsigned long nm;
ssize_t ret = queue_var_store(&nm, page, count);
+ if (ret < 0)
+ return ret;
+
spin_lock_irq(q->queue_lock);
queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
@@ -264,6 +289,9 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
unsigned long val;
ret = queue_var_store(&val, page, count);
+ if (ret < 0)
+ return ret;
+
spin_lock_irq(q->queue_lock);
if (val == 2) {
queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
@@ -364,6 +392,11 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
.show = queue_discard_zeroes_data_show,
};
+static struct queue_sysfs_entry queue_write_same_max_entry = {
+ .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
+ .show = queue_write_same_max_show,
+};
+
static struct queue_sysfs_entry queue_nonrot_entry = {
.attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
.show = queue_show_nonrot,
@@ -411,6 +444,7 @@ static struct attribute *default_attrs[] = {
&queue_discard_granularity_entry.attr,
&queue_discard_max_entry.attr,
&queue_discard_zeroes_data_entry.attr,
+ &queue_write_same_max_entry.attr,
&queue_nonrot_entry.attr,
&queue_nomerges_entry.attr,
&queue_rq_affinity_entry.attr,
@@ -527,6 +561,12 @@ int blk_register_queue(struct gendisk *disk)
if (WARN_ON(!q))
return -ENXIO;
+ /*
+ * Initialization must be complete by now. Finish the initial
+ * bypass from queue allocation.
+ */
+ blk_queue_bypass_end(q);
+
ret = blk_trace_init_sysfs(dev);
if (ret)
return ret;
diff --git a/block/blk-tag.c b/block/blk-tag.c
index 4af6f5cc1167..cc345e1d8d4e 100644
--- a/block/blk-tag.c
+++ b/block/blk-tag.c
@@ -186,7 +186,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
tags = __blk_queue_init_tags(q, depth);
if (!tags)
- goto fail;
+ return -ENOMEM;
+
} else if (q->queue_tags) {
rc = blk_queue_resize_tags(q, depth);
if (rc)
@@ -203,9 +204,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
-fail:
- kfree(tags);
- return -ENOMEM;
}
EXPORT_SYMBOL(blk_queue_init_tags);
diff --git a/block/blk.h b/block/blk.h
index 2a0ea32d249f..ca51543b248c 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -171,14 +171,13 @@ static inline int queue_congestion_off_threshold(struct request_queue *q)
*
* a) it's attached to a gendisk, and
* b) the queue had IO stats enabled when this request was started, and
- * c) it's a file system request or a discard request
+ * c) it's a file system request
*/
static inline int blk_do_io_stat(struct request *rq)
{
return rq->rq_disk &&
(rq->cmd_flags & REQ_IO_STAT) &&
- (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD));
+ (rq->cmd_type == REQ_TYPE_FS);
}
/*
diff --git a/block/elevator.c b/block/elevator.c
index 6a55d418896f..9b1d42b62f20 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -562,8 +562,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */
- if (rq->cmd_type == REQ_TYPE_FS ||
- (rq->cmd_flags & REQ_DISCARD)) {
+ if (rq->cmd_type == REQ_TYPE_FS) {
q->end_sector = rq_end_sector(rq);
q->boundary_rq = rq;
}
@@ -605,8 +604,7 @@ void __elv_add_request(struct request_queue *q, struct request *rq, int where)
if (elv_attempt_insert_merge(q, rq))
break;
case ELEVATOR_INSERT_SORT:
- BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
- !(rq->cmd_flags & REQ_DISCARD));
+ BUG_ON(rq->cmd_type != REQ_TYPE_FS);
rq->cmd_flags |= REQ_SORTED;
q->nr_sorted++;
if (rq_mergeable(rq)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index 4476e0e85d16..769d2960c0a6 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -185,6 +185,22 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
+static int blk_ioctl_zeroout(struct block_device *bdev, uint64_t start,
+ uint64_t len)
+{
+ if (start & 511)
+ return -EINVAL;
+ if (len & 511)
+ return -EINVAL;
+ start >>= 9;
+ len >>= 9;
+
+ if (start + len > (i_size_read(bdev->bd_inode) >> 9))
+ return -EINVAL;
+
+ return blkdev_issue_zeroout(bdev, start, len, GFP_KERNEL);
+}
+
static int put_ushort(unsigned long arg, unsigned short val)
{
return put_user(val, (unsigned short __user *)arg);
@@ -300,6 +316,17 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return blk_ioctl_discard(bdev, range[0], range[1],
cmd == BLKSECDISCARD);
}
+ case BLKZEROOUT: {
+ uint64_t range[2];
+
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
+ if (copy_from_user(range, (void __user *)arg, sizeof(range)))
+ return -EFAULT;
+
+ return blk_ioctl_zeroout(bdev, range[0], range[1]);
+ }
case HDIO_GETGEO: {
struct hd_geometry geo;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index a796407123c7..9522682ee8fb 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -131,6 +131,7 @@ config BLK_CPQ_DA
config BLK_CPQ_CISS_DA
tristate "Compaq Smart Array 5xxx support"
depends on PCI
+ select CHECK_SIGNATURE
help
This is the driver for Compaq Smart Array 5xxx controllers.
Everyone using these boards should say Y here.
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index b0f553b26d0f..ca83f96756ad 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -5205,7 +5205,6 @@ static void cciss_shutdown(struct pci_dev *pdev)
return;
}
/* write all data in the battery backed cache to disk */
- memset(flush_buf, 0, 4);
return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf,
4, 0, CTLR_LUNID, TYPE_CMD);
kfree(flush_buf);
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 3fbef018ce55..d4dd563d0d54 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -82,22 +82,19 @@ void drbd_md_put_buffer(struct drbd_conf *mdev)
wake_up(&mdev->misc_wait);
}
-static bool md_io_allowed(struct drbd_conf *mdev)
-{
- enum drbd_disk_state ds = mdev->state.disk;
- return ds >= D_NEGOTIATING || ds == D_ATTACHING;
-}
-
-void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+void wait_until_done_or_force_detached(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
unsigned int *done)
{
long dt = bdev->dc.disk_timeout * HZ / 10;
if (dt == 0)
dt = MAX_SCHEDULE_TIMEOUT;
- dt = wait_event_timeout(mdev->misc_wait, *done || !md_io_allowed(mdev), dt);
- if (dt == 0)
+ dt = wait_event_timeout(mdev->misc_wait,
+ *done || drbd_test_flag(mdev, FORCE_DETACH), dt);
+ if (dt == 0) {
dev_err(DEV, "meta-data IO operation timed out\n");
+ drbd_chk_io_error(mdev, 1, DRBD_FORCE_DETACH);
+ }
}
static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
@@ -111,7 +108,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
mdev->md_io.done = 0;
mdev->md_io.error = -ENODEV;
- if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+ if ((rw & WRITE) && !drbd_test_flag(mdev, MD_NO_FUA))
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
@@ -137,7 +134,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
bio_endio(bio, -EIO);
else
submit_bio(rw, bio);
- wait_until_done_or_disk_failure(mdev, bdev, &mdev->md_io.done);
+ wait_until_done_or_force_detached(mdev, bdev, &mdev->md_io.done);
ok = bio_flagged(bio, BIO_UPTODATE) && mdev->md_io.error == 0;
out:
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index d84566496746..8d8069758042 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -373,14 +373,16 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
return old_pages;
/* Trying kmalloc first, falling back to vmalloc.
- * GFP_KERNEL is ok, as this is done when a lower level disk is
- * "attached" to the drbd. Context is receiver thread or cqueue
- * thread. As we have no disk yet, we are not in the IO path,
- * not even the IO path of the peer. */
+ * GFP_NOIO, as this is called while drbd IO is "suspended",
+ * and during resize or attach on diskless Primary,
+ * we must not block on IO to ourselves.
+ * Context is receiver thread or cqueue thread/dmsetup. */
bytes = sizeof(struct page *)*want;
- new_pages = kzalloc(bytes, GFP_KERNEL);
+ new_pages = kzalloc(bytes, GFP_NOIO);
if (!new_pages) {
- new_pages = vzalloc(bytes);
+ new_pages = __vmalloc(bytes,
+ GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO,
+ PAGE_KERNEL);
if (!new_pages)
return NULL;
vmalloced = 1;
@@ -390,7 +392,7 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)
for (i = 0; i < have; i++)
new_pages[i] = old_pages[i];
for (; i < want; i++) {
- page = alloc_page(GFP_HIGHUSER);
+ page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
if (!page) {
bm_free_pages(new_pages + have, i - have);
bm_vk_free(new_pages, vmalloced);
@@ -1088,7 +1090,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
* "in_flight reached zero, all done" event.
*/
if (!atomic_dec_and_test(&ctx->in_flight))
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
else
kref_put(&ctx->kref, &bm_aio_ctx_destroy);
@@ -1103,7 +1105,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w
}
if (atomic_read(&ctx->in_flight))
- err = -EIO; /* Disk failed during IO... */
+ err = -EIO; /* Disk timeout/force-detach during IO... */
now = jiffies;
if (rw == WRITE) {
@@ -1222,11 +1224,11 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
}
bm_page_io_async(ctx, idx, WRITE_SYNC);
- wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done);
+ wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
if (ctx->error)
drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
- /* that should force detach, so the in memory bitmap will be
+ /* that causes us to detach, so the in memory bitmap will be
* gone in a moment as well. */
mdev->bm_writ_cnt++;
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index b953cc7c9c00..277c69c9465b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -808,7 +808,7 @@ enum {
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
/* global flag bits */
-enum {
+enum drbd_flag {
CREATE_BARRIER, /* next P_DATA is preceded by a P_BARRIER */
SIGNAL_ASENDER, /* whether asender wants to be interrupted */
SEND_PING, /* whether asender should send a ping asap */
@@ -831,7 +831,8 @@ enum {
once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
- WAS_IO_ERROR, /* Local disk failed returned IO error */
+ WAS_IO_ERROR, /* Local disk failed, returned IO error */
+ WAS_READ_ERROR, /* Local disk READ failed (set additionally to the above) */
FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */
@@ -857,6 +858,10 @@ enum {
* so shrink_page_list() would not recurse into,
* and potentially deadlock on, this drbd worker.
*/
+ DISCONNECT_SENT, /* Currently the last bit in this 32bit word */
+
+ /* keep last */
+ DRBD_N_FLAGS,
};
struct drbd_bitmap; /* opaque for drbd_conf */
@@ -912,6 +917,7 @@ struct drbd_md {
u64 md_offset; /* sector offset to 'super' block */
u64 la_size_sect; /* last agreed size, unit sectors */
+ spinlock_t uuid_lock;
u64 uuid[UI_SIZE];
u64 device_uuid;
u32 flags;
@@ -968,8 +974,7 @@ struct fifo_buffer {
};
struct drbd_conf {
- /* things that are stored as / read from meta data on disk */
- unsigned long flags;
+ unsigned long drbd_flags[(DRBD_N_FLAGS + BITS_PER_LONG -1)/BITS_PER_LONG];
/* configured by drbdsetup */
struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */
@@ -1051,6 +1056,7 @@ struct drbd_conf {
/* where does the admin want us to start? (sector) */
sector_t ov_start_sector;
+ sector_t ov_stop_sector;
/* where are we now? (sector) */
sector_t ov_position;
/* Start sector of out of sync range (to merge printk reporting). */
@@ -1140,6 +1146,31 @@ struct drbd_conf {
unsigned int local_max_bio_size;
};
+static inline void drbd_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ set_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline void drbd_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ clear_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ return test_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_and_set_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ return test_and_set_bit(f, &mdev->drbd_flags[0]);
+}
+
+static inline int drbd_test_and_clear_flag(struct drbd_conf *mdev, enum drbd_flag f)
+{
+ return test_and_clear_bit(f, &mdev->drbd_flags[0]);
+}
+
static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
{
struct drbd_conf *mdev;
@@ -1283,8 +1314,9 @@ extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev);
extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
-extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local);
extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local);
+extern void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local);
+extern void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local);
extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local);
extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local);
extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
@@ -1577,8 +1609,8 @@ extern void *drbd_md_get_buffer(struct drbd_conf *mdev);
extern void drbd_md_put_buffer(struct drbd_conf *mdev);
extern int drbd_md_sync_page_io(struct drbd_conf *mdev,
struct drbd_backing_dev *bdev, sector_t sector, int rw);
-extern void wait_until_done_or_disk_failure(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
- unsigned int *done);
+extern void wait_until_done_or_force_detached(struct drbd_conf *mdev,
+ struct drbd_backing_dev *bdev, unsigned int *done);
extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int);
extern void drbd_rs_controller_reset(struct drbd_conf *mdev);
@@ -1808,12 +1840,12 @@ static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
static inline void drbd_state_lock(struct drbd_conf *mdev)
{
wait_event(mdev->misc_wait,
- !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ !drbd_test_and_set_flag(mdev, CLUSTER_ST_CHANGE));
}
static inline void drbd_state_unlock(struct drbd_conf *mdev)
{
- clear_bit(CLUSTER_ST_CHANGE, &mdev->flags);
+ drbd_clear_flag(mdev, CLUSTER_ST_CHANGE);
wake_up(&mdev->misc_wait);
}
@@ -1848,31 +1880,54 @@ static inline int drbd_request_state(struct drbd_conf *mdev,
}
enum drbd_force_detach_flags {
- DRBD_IO_ERROR,
+ DRBD_READ_ERROR,
+ DRBD_WRITE_ERROR,
DRBD_META_IO_ERROR,
DRBD_FORCE_DETACH,
};
#define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
static inline void __drbd_chk_io_error_(struct drbd_conf *mdev,
- enum drbd_force_detach_flags forcedetach,
+ enum drbd_force_detach_flags df,
const char *where)
{
switch (mdev->ldev->dc.on_io_error) {
case EP_PASS_ON:
- if (forcedetach == DRBD_IO_ERROR) {
+ if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s.\n", where);
if (mdev->state.disk > D_INCONSISTENT)
_drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_HARD, NULL);
break;
}
- /* NOTE fall through to detach case if forcedetach set */
+ /* NOTE fall through for DRBD_META_IO_ERROR or DRBD_FORCE_DETACH */
case EP_DETACH:
case EP_CALL_HELPER:
- set_bit(WAS_IO_ERROR, &mdev->flags);
- if (forcedetach == DRBD_FORCE_DETACH)
- set_bit(FORCE_DETACH, &mdev->flags);
+ /* Remember whether we saw a READ or WRITE error.
+ *
+ * Recovery of the affected area for WRITE failure is covered
+ * by the activity log.
+ * READ errors may fall outside that area though. Certain READ
+ * errors can be "healed" by writing good data to the affected
+ * blocks, which triggers block re-allocation in lower layers.
+ *
+ * If we can not write the bitmap after a READ error,
+ * we may need to trigger a full sync (see w_go_diskless()).
+ *
+ * Force-detach is not really an IO error, but rather a
+ * desperate measure to try to deal with a completely
+ * unresponsive lower level IO stack.
+ * Still it should be treated as a WRITE error.
+ *
+ * Meta IO error is always WRITE error:
+ * we read meta data only once during attach,
+ * which will fail in case of errors.
+ */
+ drbd_set_flag(mdev, WAS_IO_ERROR);
+ if (df == DRBD_READ_ERROR)
+ drbd_set_flag(mdev, WAS_READ_ERROR);
+ if (df == DRBD_FORCE_DETACH)
+ drbd_set_flag(mdev, FORCE_DETACH);
if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV,
@@ -2033,13 +2088,13 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
static inline void wake_asender(struct drbd_conf *mdev)
{
- if (test_bit(SIGNAL_ASENDER, &mdev->flags))
+ if (drbd_test_flag(mdev, SIGNAL_ASENDER))
force_sig(DRBD_SIG, mdev->asender.task);
}
static inline void request_ping(struct drbd_conf *mdev)
{
- set_bit(SEND_PING, &mdev->flags);
+ drbd_set_flag(mdev, SEND_PING);
wake_asender(mdev);
}
@@ -2370,7 +2425,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
if (is_susp(mdev->state))
return false;
- if (test_bit(SUSPEND_IO, &mdev->flags))
+ if (drbd_test_flag(mdev, SUSPEND_IO))
return false;
/* to avoid potential deadlock or bitmap corruption,
@@ -2385,7 +2440,7 @@ static inline bool may_inc_ap_bio(struct drbd_conf *mdev)
* and we are within the spinlock anyways, we have this workaround. */
if (atomic_read(&mdev->ap_bio_cnt) > mxb)
return false;
- if (test_bit(BITMAP_IO, &mdev->flags))
+ if (drbd_test_flag(mdev, BITMAP_IO))
return false;
return true;
}
@@ -2423,8 +2478,8 @@ static inline void dec_ap_bio(struct drbd_conf *mdev)
D_ASSERT(ap_bio >= 0);
- if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+ if (ap_bio == 0 && drbd_test_flag(mdev, BITMAP_IO)) {
+ if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}
@@ -2473,7 +2528,7 @@ static inline void drbd_update_congested(struct drbd_conf *mdev)
{
struct sock *sk = mdev->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
- set_bit(NET_CONGESTED, &mdev->flags);
+ drbd_set_flag(mdev, NET_CONGESTED);
}
static inline int drbd_queue_order_type(struct drbd_conf *mdev)
@@ -2490,14 +2545,15 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{
int r;
- if (test_bit(MD_NO_FUA, &mdev->flags))
+ if (drbd_test_flag(mdev, MD_NO_FUA))
return;
- r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
+ r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_NOIO, NULL);
if (r) {
- set_bit(MD_NO_FUA, &mdev->flags);
+ drbd_set_flag(mdev, MD_NO_FUA);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
}
}
+
#endif
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index f93a0320e952..9b833e0fb440 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -162,23 +162,12 @@ static const struct block_device_operations drbd_ops = {
.release = drbd_release,
};
-static void bio_destructor_drbd(struct bio *bio)
-{
- bio_free(bio, drbd_md_io_bio_set);
-}
-
struct bio *bio_alloc_drbd(gfp_t gfp_mask)
{
- struct bio *bio;
-
if (!drbd_md_io_bio_set)
return bio_alloc(gfp_mask, 1);
- bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
- if (!bio)
- return NULL;
- bio->bi_destructor = bio_destructor_drbd;
- return bio;
+ return bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
}
#ifdef __CHECKER__
@@ -333,7 +322,7 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
list_splice_init(&b->requests, &mdev->barrier_acked_requests);
nob = b->next;
- if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ if (drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
_tl_add_barrier(mdev, b);
if (nob)
mdev->oldest_tle = nob;
@@ -392,7 +381,7 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
if (b->w.cb == NULL) {
b->w.cb = w_send_barrier;
inc_ap_pending(mdev);
- set_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_set_flag(mdev, CREATE_BARRIER);
}
drbd_queue_work(&mdev->data.work, &b->w);
@@ -475,7 +464,7 @@ static void _tl_clear(struct drbd_conf *mdev)
}
/* ensure bit indicating barrier is required is clear */
- clear_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_clear_flag(mdev, CREATE_BARRIER);
memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *));
@@ -593,10 +582,10 @@ _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
unsigned long flags;
enum drbd_state_rv rv;
- if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_SUCCESS))
return SS_CW_SUCCESS;
- if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, CL_ST_CHG_FAIL))
return SS_CW_FAILED_BY_PEER;
rv = 0;
@@ -670,6 +659,9 @@ drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
goto abort;
}
+ if (mask.conn == C_MASK && val.conn == C_DISCONNECTING)
+ drbd_set_flag(mdev, DISCONNECT_SENT);
+
wait_event(mdev->state_wait,
(rv = _req_st_cond(mdev, mask, val)));
@@ -858,7 +850,7 @@ is_valid_state_transition(struct drbd_conf *mdev, union drbd_state ns,
/* While establishing a connection only allow cstate to change.
Delay/refuse role changes, detach attach etc... */
- if (test_bit(STATE_SENT, &mdev->flags) &&
+ if (drbd_test_flag(mdev, STATE_SENT) &&
!(os.conn == C_WF_REPORT_PARAMS ||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
rv = SS_IN_TRANSIENT_STATE;
@@ -1117,7 +1109,7 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
static void drbd_resume_al(struct drbd_conf *mdev)
{
- if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, AL_SUSPENDED))
dev_info(DEV, "Resumed AL updates\n");
}
@@ -1223,8 +1215,8 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
if (ns.disk == D_DISKLESS &&
ns.conn == C_STANDALONE &&
ns.role == R_SECONDARY &&
- !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
- set_bit(DEVICE_DYING, &mdev->flags);
+ !drbd_test_and_set_flag(mdev, CONFIG_PENDING))
+ drbd_set_flag(mdev, DEVICE_DYING);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
@@ -1242,13 +1234,15 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
wake_up(&mdev->misc_wait);
wake_up(&mdev->state_wait);
- /* aborted verify run. log the last position */
+ /* Aborted verify run, or we reached the stop sector.
+ * Log the last position, unless end-of-device. */
if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
- ns.conn < C_CONNECTED) {
+ ns.conn <= C_CONNECTED) {
mdev->ov_start_sector =
BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
- dev_info(DEV, "Online Verify reached sector %llu\n",
- (unsigned long long)mdev->ov_start_sector);
+ if (mdev->ov_left)
+ dev_info(DEV, "Online Verify reached sector %llu\n",
+ (unsigned long long)mdev->ov_start_sector);
}
if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
@@ -1297,7 +1291,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
- if (test_bit(CRASHED_PRIMARY, &mdev->flags))
+ if (drbd_test_flag(mdev, CRASHED_PRIMARY))
mdf |= MDF_CRASHED_PRIMARY;
if (mdev->state.role == R_PRIMARY ||
(mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
@@ -1322,7 +1316,7 @@ __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
/* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
- set_bit(CONSIDER_RESYNC, &mdev->flags);
+ drbd_set_flag(mdev, CONSIDER_RESYNC);
/* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
@@ -1406,7 +1400,7 @@ int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
D_ASSERT(current == mdev->worker.task);
/* open coded non-blocking drbd_suspend_io(mdev); */
- set_bit(SUSPEND_IO, &mdev->flags);
+ drbd_set_flag(mdev, SUSPEND_IO);
drbd_bm_lock(mdev, why, flags);
rv = io_fn(mdev);
@@ -1432,7 +1426,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
union drbd_state nsm = (union drbd_state){ .i = -1 };
if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_clear_flag(mdev, CRASHED_PRIMARY);
if (mdev->p_uuid)
mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
}
@@ -1472,9 +1466,9 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (ns.susp_fen) {
/* case1: The outdate peer handler is successful: */
if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) {
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
}
spin_lock_irq(&mdev->req_lock);
_tl_clear(mdev);
@@ -1483,7 +1477,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
}
/* case2: The connection was established again: */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
what = resend;
nsm.susp_fen = 0;
}
@@ -1540,7 +1534,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
if (is_susp(mdev->state)) {
- set_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_set_flag(mdev, NEW_CUR_UUID);
} else {
drbd_uuid_new_current(mdev);
drbd_send_uuids(mdev);
@@ -1623,17 +1617,20 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* first half of local IO error, failure to attach,
* or administrative detach */
if (os.disk != D_FAILED && ns.disk == D_FAILED) {
- enum drbd_io_error_p eh = EP_PASS_ON;
- int was_io_error = 0;
/* corresponding get_ldev was in __drbd_set_state, to serialize
* our cleanup here with the transition to D_DISKLESS.
- * But is is still not save to dreference ldev here, since
- * we might come from an failed Attach before ldev was set. */
+ * But it is still not safe to dreference ldev here, we may end
+ * up here from a failed attach, before ldev was even set. */
if (mdev->ldev) {
- eh = mdev->ldev->dc.on_io_error;
- was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
-
- if (was_io_error && eh == EP_CALL_HELPER)
+ enum drbd_io_error_p eh = mdev->ldev->dc.on_io_error;
+
+ /* In some setups, this handler triggers a suicide,
+ * basically mapping IO error to node failure, to
+ * reduce the number of different failure scenarios.
+ *
+ * This handler intentionally runs before we abort IO,
+ * notify the peer, or try to update our meta data. */
+ if (eh == EP_CALL_HELPER && drbd_test_flag(mdev, WAS_IO_ERROR))
drbd_khelper(mdev, "local-io-error");
/* Immediately allow completion of all application IO,
@@ -1649,7 +1646,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
* So aborting local requests may cause crashes,
* or even worse, silent data corruption.
*/
- if (test_and_clear_bit(FORCE_DETACH, &mdev->flags))
+ if (drbd_test_flag(mdev, FORCE_DETACH))
tl_abort_disk_io(mdev);
/* current state still has to be D_FAILED,
@@ -1698,7 +1695,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
/* Disks got bigger while they were detached */
if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
- test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
+ drbd_test_and_clear_flag(mdev, RESYNC_AFTER_NEG)) {
if (ns.conn == C_CONNECTED)
resync_after_online_grow(mdev);
}
@@ -1714,9 +1711,16 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
drbd_send_state(mdev, ns);
+ /* Verify finished, or reached stop sector. Peer did not know about
+ * the stop sector, and we may even have changed the stop sector during
+ * verify to interrupt/stop early. Send the new state. */
+ if (os.conn == C_VERIFY_S && ns.conn == C_CONNECTED
+ && mdev->agreed_pro_version >= 97)
+ drbd_send_state(mdev, ns);
+
/* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS) {
- clear_bit(STATE_SENT, &mdev->flags);
+ drbd_clear_flag(mdev, STATE_SENT);
wake_up(&mdev->state_wait);
}
@@ -1749,7 +1753,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (os.aftr_isp != ns.aftr_isp)
resume_next_sg(mdev);
/* set in __drbd_set_state, unless CONFIG_PENDING was set */
- if (test_bit(DEVICE_DYING, &mdev->flags))
+ if (drbd_test_flag(mdev, DEVICE_DYING))
drbd_thread_stop_nowait(&mdev->worker);
}
@@ -2136,13 +2140,15 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
if (!get_ldev_if_state(mdev, D_NEGOTIATING))
return 1;
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
for (i = UI_CURRENT; i < UI_SIZE; i++)
p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
mdev->comm_bm_set = drbd_bm_total_weight(mdev);
p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
- uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
+ uuid_flags |= drbd_test_flag(mdev, CRASHED_PRIMARY) ? 2 : 0;
uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
@@ -2772,7 +2778,7 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
offset += sent;
} while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
set_fs(oldfs);
- clear_bit(NET_CONGESTED, &mdev->flags);
+ drbd_clear_flag(mdev, NET_CONGESTED);
ok = (len == 0);
if (likely(ok))
@@ -2874,7 +2880,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
dp_flags |= DP_MAY_SET_IN_SYNC;
p.dp_flags = cpu_to_be32(dp_flags);
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ drbd_set_flag(mdev, UNPLUG_REMOTE);
ok = (sizeof(p) ==
drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0));
if (ok && dgs) {
@@ -3053,7 +3059,7 @@ int drbd_send(struct drbd_conf *mdev, struct socket *sock,
} while (sent < size);
if (sock == mdev->data.socket)
- clear_bit(NET_CONGESTED, &mdev->flags);
+ drbd_clear_flag(mdev, NET_CONGESTED);
if (rv <= 0) {
if (rv != -EAGAIN) {
@@ -3260,7 +3266,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
}
drbd_free_resources(mdev);
- clear_bit(AL_SUSPENDED, &mdev->flags);
+ drbd_clear_flag(mdev, AL_SUSPENDED);
/*
* currently we drbd_init_ee only on module load, so
@@ -3553,7 +3559,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out;
}
- if (test_bit(CALLBACK_PENDING, &mdev->flags)) {
+ if (drbd_test_flag(mdev, CALLBACK_PENDING)) {
r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is
@@ -3577,7 +3583,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
reason = 'b';
}
- if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
+ if (bdi_bits & (1 << BDI_async_congested) && drbd_test_flag(mdev, NET_CONGESTED)) {
r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n';
}
@@ -3864,7 +3870,7 @@ void drbd_md_sync(struct drbd_conf *mdev)
del_timer(&mdev->md_sync_timer);
/* timer may be rearmed by drbd_md_mark_dirty() now. */
- if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
+ if (!drbd_test_and_clear_flag(mdev, MD_DIRTY))
return;
/* We use here D_FAILED and not D_ATTACHING because we try to write
@@ -4008,7 +4014,7 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
#ifdef DEBUG
void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
+ if (!drbd_test_and_set_flag(mdev, MD_DIRTY)) {
mod_timer(&mdev->md_sync_timer, jiffies + HZ);
mdev->last_md_mark_dirty.line = line;
mdev->last_md_mark_dirty.func = func;
@@ -4017,12 +4023,12 @@ void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *
#else
void drbd_md_mark_dirty(struct drbd_conf *mdev)
{
- if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, MD_DIRTY))
mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
}
#endif
-static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
+void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
{
int i;
@@ -4030,7 +4036,7 @@ static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
}
-void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+void __drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
if (idx == UI_CURRENT) {
if (mdev->state.role == R_PRIMARY)
@@ -4045,14 +4051,24 @@ void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
drbd_md_mark_dirty(mdev);
}
+void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+}
void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
{
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (mdev->ldev->md.uuid[idx]) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
}
- _drbd_uuid_set(mdev, idx, val);
+ __drbd_uuid_set(mdev, idx, val);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
}
/**
@@ -4065,15 +4081,20 @@ void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
{
u64 val;
- unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
+ unsigned long long bm_uuid;
+
+ get_random_bytes(&val, sizeof(u64));
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
if (bm_uuid)
dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
+ __drbd_uuid_set(mdev, UI_CURRENT, val);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
- get_random_bytes(&val, sizeof(u64));
- _drbd_uuid_set(mdev, UI_CURRENT, val);
drbd_print_uuids(mdev, "new current UUID");
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
@@ -4081,9 +4102,11 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
{
+ unsigned long flags;
if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
return;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
if (val == 0) {
drbd_uuid_move_history(mdev);
mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
@@ -4095,6 +4118,8 @@ void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
}
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
+
drbd_md_mark_dirty(mdev);
}
@@ -4160,14 +4185,14 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
put_ldev(mdev);
}
- clear_bit(BITMAP_IO, &mdev->flags);
+ drbd_clear_flag(mdev, BITMAP_IO);
smp_mb__after_clear_bit();
wake_up(&mdev->misc_wait);
if (work->done)
work->done(mdev, rv);
- clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
+ drbd_clear_flag(mdev, BITMAP_IO_QUEUED);
work->why = NULL;
work->flags = 0;
@@ -4188,7 +4213,7 @@ void drbd_ldev_destroy(struct drbd_conf *mdev)
__free_page(mdev->md_io_tmpp);
mdev->md_io_tmpp = NULL;
}
- clear_bit(GO_DISKLESS, &mdev->flags);
+ drbd_clear_flag(mdev, GO_DISKLESS);
}
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
@@ -4198,6 +4223,26 @@ static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so once put_ldev reaches zero
* again, it will be safe to free them. */
+
+ /* Try to write changed bitmap pages, read errors may have just
+ * set some bits outside the area covered by the activity log.
+ *
+ * If we have an IO error during the bitmap writeout,
+ * we will want a full sync next time, just in case.
+ * (Do we want a specific meta data flag for this?)
+ *
+ * If that does not make it to stable storage either,
+ * we cannot do anything about that anymore. */
+ if (mdev->bitmap) {
+ if (drbd_bitmap_io_from_worker(mdev, drbd_bm_write,
+ "detach", BM_LOCKED_MASK)) {
+ if (drbd_test_flag(mdev, WAS_READ_ERROR)) {
+ drbd_md_set_flag(mdev, MDF_FULL_SYNC);
+ drbd_md_sync(mdev);
+ }
+ }
+ }
+
drbd_force_state(mdev, NS(disk, D_DISKLESS));
return 1;
}
@@ -4205,7 +4250,7 @@ static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused
void drbd_go_diskless(struct drbd_conf *mdev)
{
D_ASSERT(mdev->state.disk == D_FAILED);
- if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, GO_DISKLESS))
drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
}
@@ -4228,8 +4273,8 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
{
D_ASSERT(current == mdev->worker.task);
- D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
- D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
+ D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO_QUEUED));
+ D_ASSERT(!drbd_test_flag(mdev, BITMAP_IO));
D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
if (mdev->bm_io_work.why)
dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
@@ -4241,9 +4286,9 @@ void drbd_queue_bitmap_io(struct drbd_conf *mdev,
mdev->bm_io_work.flags = flags;
spin_lock_irq(&mdev->req_lock);
- set_bit(BITMAP_IO, &mdev->flags);
+ drbd_set_flag(mdev, BITMAP_IO);
if (atomic_read(&mdev->ap_bio_cnt) == 0) {
- if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, BITMAP_IO_QUEUED))
drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
}
spin_unlock_irq(&mdev->req_lock);
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index edb490aad8b4..c8dda4e8dfce 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -148,7 +148,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
int ret;
if (current == mdev->worker.task)
- set_bit(CALLBACK_PENDING, &mdev->flags);
+ drbd_set_flag(mdev, CALLBACK_PENDING);
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
@@ -193,7 +193,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
(ret >> 8) & 0xff, ret);
if (current == mdev->worker.task)
- clear_bit(CALLBACK_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, CALLBACK_PENDING);
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
@@ -295,7 +295,7 @@ static int _try_outdate_peer_async(void *data)
*/
spin_lock_irq(&mdev->req_lock);
ns = mdev->state;
- if (ns.conn < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &mdev->flags)) {
+ if (ns.conn < C_WF_REPORT_PARAMS && !drbd_test_flag(mdev, STATE_SENT)) {
ns.pdsk = nps;
_drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
}
@@ -583,7 +583,7 @@ char *ppsize(char *buf, unsigned long long size)
*/
void drbd_suspend_io(struct drbd_conf *mdev)
{
- set_bit(SUSPEND_IO, &mdev->flags);
+ drbd_set_flag(mdev, SUSPEND_IO);
if (is_susp(mdev->state))
return;
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
@@ -591,7 +591,7 @@ void drbd_suspend_io(struct drbd_conf *mdev)
void drbd_resume_io(struct drbd_conf *mdev)
{
- clear_bit(SUSPEND_IO, &mdev->flags);
+ drbd_clear_flag(mdev, SUSPEND_IO);
wake_up(&mdev->misc_wait);
}
@@ -881,8 +881,8 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
*/
static void drbd_reconfig_start(struct drbd_conf *mdev)
{
- wait_event(mdev->state_wait, !test_and_set_bit(CONFIG_PENDING, &mdev->flags));
- wait_event(mdev->state_wait, !test_bit(DEVICE_DYING, &mdev->flags));
+ wait_event(mdev->state_wait, !drbd_test_and_set_flag(mdev, CONFIG_PENDING));
+ wait_event(mdev->state_wait, !drbd_test_flag(mdev, DEVICE_DYING));
drbd_thread_start(&mdev->worker);
drbd_flush_workqueue(mdev);
}
@@ -896,10 +896,10 @@ static void drbd_reconfig_done(struct drbd_conf *mdev)
if (mdev->state.disk == D_DISKLESS &&
mdev->state.conn == C_STANDALONE &&
mdev->state.role == R_SECONDARY) {
- set_bit(DEVICE_DYING, &mdev->flags);
+ drbd_set_flag(mdev, DEVICE_DYING);
drbd_thread_stop_nowait(&mdev->worker);
} else
- clear_bit(CONFIG_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, CONFIG_PENDING);
spin_unlock_irq(&mdev->req_lock);
wake_up(&mdev->state_wait);
}
@@ -919,7 +919,7 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
spin_lock_irq(&mdev->req_lock);
if (mdev->state.conn < C_CONNECTED)
- s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
+ s = !drbd_test_and_set_flag(mdev, AL_SUSPENDED);
spin_unlock_irq(&mdev->req_lock);
@@ -958,7 +958,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* make sure there is no leftover from previous force-detach attempts */
- clear_bit(FORCE_DETACH, &mdev->flags);
+ drbd_clear_flag(mdev, FORCE_DETACH);
+ drbd_clear_flag(mdev, WAS_IO_ERROR);
+ drbd_clear_flag(mdev, WAS_READ_ERROR);
/* and no leftover from previously aborted resync or verify, either */
mdev->rs_total = 0;
@@ -977,6 +979,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc->dc.fencing = DRBD_FENCING_DEF;
nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
+ spin_lock_init(&nbc->md.uuid_lock);
+
if (!disk_conf_from_tags(mdev, nlp->tag_list, &nbc->dc)) {
retcode = ERR_MANDATORY_TAG;
goto fail;
@@ -1166,9 +1170,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush)
- set_bit(MD_NO_FUA, &mdev->flags);
+ drbd_set_flag(mdev, MD_NO_FUA);
else
- clear_bit(MD_NO_FUA, &mdev->flags);
+ drbd_clear_flag(mdev, MD_NO_FUA);
/* Point of no return reached.
* Devices and memory are no longer released by error cleanup below.
@@ -1184,13 +1188,13 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_set_flag(mdev, CRASHED_PRIMARY);
else
- clear_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_clear_flag(mdev, CRASHED_PRIMARY);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
!(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
- set_bit(CRASHED_PRIMARY, &mdev->flags);
+ drbd_set_flag(mdev, CRASHED_PRIMARY);
cp_discovered = 1;
}
@@ -1215,18 +1219,18 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
* so we can automatically recover from a crash of a
* degraded but active "cluster" after a certain timeout.
*/
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
+ drbd_clear_flag(mdev, USE_DEGR_WFC_T);
if (mdev->state.role != R_PRIMARY &&
drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
- set_bit(USE_DEGR_WFC_T, &mdev->flags);
+ drbd_set_flag(mdev, USE_DEGR_WFC_T);
dd = drbd_determine_dev_size(mdev, 0);
if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec;
} else if (dd == grew)
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ drbd_set_flag(mdev, RESYNC_AFTER_NEG);
if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
dev_info(DEV, "Assuming that all blocks are out of sync "
@@ -1360,7 +1364,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
}
if (dt.detach_force) {
- set_bit(FORCE_DETACH, &mdev->flags);
+ drbd_set_flag(mdev, FORCE_DETACH);
drbd_force_state(mdev, NS(disk, D_FAILED));
reply->ret_code = SS_SUCCESS;
goto out;
@@ -1705,7 +1709,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
- iass = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ iass = drbd_test_flag(mdev, DISCARD_CONCURRENT);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1763,7 +1767,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
if (mdev->state.conn == C_CONNECTED) {
if (dd == grew)
- set_bit(RESIZE_PENDING, &mdev->flags);
+ drbd_set_flag(mdev, RESIZE_PENDING);
drbd_send_uuids(mdev);
drbd_send_sizes(mdev, 1, ddsf);
@@ -1981,7 +1985,7 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
@@ -2024,7 +2028,7 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re
* resync just being finished, wait for it before requesting a new resync.
* Also wait for it's after_state_ch(). */
drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
@@ -2092,9 +2096,9 @@ static int drbd_nl_suspend_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl
static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
- if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ if (drbd_test_flag(mdev, NEW_CUR_UUID)) {
drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
}
drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
@@ -2170,8 +2174,11 @@ static int drbd_nl_get_uuids(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
tl = reply->tag_list;
if (get_ldev(mdev)) {
+ unsigned long flags;
+ spin_lock_irqsave(&mdev->ldev->md.uuid_lock, flags);
tl = tl_add_blob(tl, T_uuids, mdev->ldev->md.uuid, UI_SIZE*sizeof(u64));
tl = tl_add_int(tl, T_uuids_flags, &mdev->ldev->md.flags);
+ spin_unlock_irqrestore(&mdev->ldev->md.uuid_lock, flags);
put_ldev(mdev);
}
put_unaligned(TT_END, tl++); /* Close the tag list */
@@ -2194,7 +2201,7 @@ static int drbd_nl_get_timeout_flag(struct drbd_conf *mdev, struct drbd_nl_cfg_r
tl = reply->tag_list;
rv = mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
- test_bit(USE_DEGR_WFC_T, &mdev->flags) ? UT_DEGRADED : UT_DEFAULT;
+ drbd_test_flag(mdev, USE_DEGR_WFC_T) ? UT_DEGRADED : UT_DEFAULT;
tl = tl_add_blob(tl, T_use_degraded, &rv, sizeof(rv));
put_unaligned(TT_END, tl++); /* Close the tag list */
@@ -2206,8 +2213,10 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply)
{
/* default to resume from last known position, if possible */
- struct start_ov args =
- { .start_sector = mdev->ov_start_sector };
+ struct start_ov args = {
+ .start_sector = mdev->ov_start_sector,
+ .stop_sector = ULLONG_MAX,
+ };
if (!start_ov_from_tags(mdev, nlp->tag_list, &args)) {
reply->ret_code = ERR_MANDATORY_TAG;
@@ -2217,10 +2226,11 @@ static int drbd_nl_start_ov(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
drbd_suspend_io(mdev);
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
- /* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = args.start_sector & ~BM_SECT_PER_BIT;
+ /* w_make_ov_request expects start position to be aligned */
+ mdev->ov_start_sector = args.start_sector & ~(BM_SECT_PER_BIT-1);
+ mdev->ov_stop_sector = args.stop_sector;
reply->ret_code = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
drbd_resume_io(mdev);
return 0;
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index 5496104f90b9..662bc8ef830a 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -167,18 +167,24 @@ static void drbd_syncer_progress(struct drbd_conf *mdev, struct seq_file *seq)
* we convert to sectors in the display below. */
unsigned long bm_bits = drbd_bm_bits(mdev);
unsigned long bit_pos;
+ unsigned long long stop_sector = 0;
if (mdev->state.conn == C_VERIFY_S ||
- mdev->state.conn == C_VERIFY_T)
+ mdev->state.conn == C_VERIFY_T) {
bit_pos = bm_bits - mdev->ov_left;
- else
+ if (mdev->agreed_pro_version >= 97)
+ stop_sector = mdev->ov_stop_sector;
+ } else
bit_pos = mdev->bm_resync_fo;
/* Total sectors may be slightly off for oddly
* sized devices. So what. */
seq_printf(seq,
- "\t%3d%% sector pos: %llu/%llu\n",
+ "\t%3d%% sector pos: %llu/%llu",
(int)(bit_pos / (bm_bits/100+1)),
(unsigned long long)bit_pos * BM_SECT_PER_BIT,
(unsigned long long)bm_bits * BM_SECT_PER_BIT);
+ if (stop_sector != 0 && stop_sector != ULLONG_MAX)
+ seq_printf(seq, " stop sector: %llu", stop_sector);
+ seq_printf(seq, "\n");
}
}
@@ -264,7 +270,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
mdev->state.peer_isp ? 'p' : '-',
mdev->state.user_isp ? 'u' : '-',
mdev->congestion_reason ?: '-',
- test_bit(AL_SUSPENDED, &mdev->flags) ? 's' : '-',
+ drbd_test_flag(mdev, AL_SUSPENDED) ? 's' : '-',
mdev->send_cnt/2,
mdev->recv_cnt/2,
mdev->writ_cnt/2,
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index c74ca2df7431..eb0cafea1423 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -516,40 +516,29 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
oldfs = get_fs();
set_fs(KERNEL_DS);
+ rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
+ set_fs(oldfs);
- for (;;) {
- rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
- if (rv == size)
- break;
-
- /* Note:
- * ECONNRESET other side closed the connection
- * ERESTARTSYS (on sock) we got a signal
- */
-
- if (rv < 0) {
- if (rv == -ECONNRESET)
- dev_info(DEV, "sock was reset by peer\n");
- else if (rv != -ERESTARTSYS)
- dev_err(DEV, "sock_recvmsg returned %d\n", rv);
- break;
- } else if (rv == 0) {
- dev_info(DEV, "sock was shut down by peer\n");
- break;
- } else {
- /* signal came in, or peer/link went down,
- * after we read a partial message
- */
- /* D_ASSERT(signal_pending(current)); */
- break;
+ if (rv < 0) {
+ if (rv == -ECONNRESET)
+ dev_info(DEV, "sock was reset by peer\n");
+ else if (rv != -ERESTARTSYS)
+ dev_err(DEV, "sock_recvmsg returned %d\n", rv);
+ } else if (rv == 0) {
+ if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
+ long t; /* time_left */
+ t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
+ mdev->net_conf->ping_timeo * HZ/10);
+ if (t)
+ goto out;
}
- };
-
- set_fs(oldfs);
+ dev_info(DEV, "sock was shut down by peer\n");
+ }
if (rv != size)
drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
+out:
return rv;
}
@@ -760,11 +749,10 @@ static int drbd_connect(struct drbd_conf *mdev)
D_ASSERT(!mdev->data.socket);
+ drbd_clear_flag(mdev, DISCONNECT_SENT);
if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
return -2;
- clear_bit(DISCARD_CONCURRENT, &mdev->flags);
-
sock = NULL;
msock = NULL;
@@ -784,6 +772,7 @@ static int drbd_connect(struct drbd_conf *mdev)
sock = s;
s = NULL;
} else if (!msock) {
+ drbd_clear_flag(mdev, DISCARD_CONCURRENT);
drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
msock = s;
s = NULL;
@@ -821,7 +810,7 @@ retry:
sock_release(msock);
}
msock = s;
- set_bit(DISCARD_CONCURRENT, &mdev->flags);
+ drbd_set_flag(mdev, DISCARD_CONCURRENT);
break;
default:
dev_warn(DEV, "Error receiving initial packet\n");
@@ -903,18 +892,18 @@ retry:
if (drbd_send_protocol(mdev) == -1)
return -1;
- set_bit(STATE_SENT, &mdev->flags);
+ drbd_set_flag(mdev, STATE_SENT);
drbd_send_sync_param(mdev, &mdev->sync_conf);
drbd_send_sizes(mdev, 0, 0);
drbd_send_uuids(mdev);
drbd_send_current_state(mdev);
- clear_bit(USE_DEGR_WFC_T, &mdev->flags);
- clear_bit(RESIZE_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, USE_DEGR_WFC_T);
+ drbd_clear_flag(mdev, RESIZE_PENDING);
spin_lock_irq(&mdev->req_lock);
rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
if (mdev->state.conn != C_WF_REPORT_PARAMS)
- clear_bit(STATE_SENT, &mdev->flags);
+ drbd_clear_flag(mdev, STATE_SENT);
spin_unlock_irq(&mdev->req_lock);
if (rv < SS_SUCCESS)
@@ -968,7 +957,7 @@ static void drbd_flush(struct drbd_conf *mdev)
int rv;
if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
- rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_KERNEL,
+ rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_NOIO,
NULL);
if (rv) {
dev_info(DEV, "local disk flush failed with status %d\n", rv);
@@ -1743,7 +1732,7 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
/* don't get the req_lock yet,
* we may sleep in drbd_wait_peer_seq */
const int size = e->size;
- const int discard = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ const int discard = drbd_test_flag(mdev, DISCARD_CONCURRENT);
DEFINE_WAIT(wait);
struct drbd_request *i;
struct hlist_node *n;
@@ -2211,7 +2200,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
"Using discard-least-changes instead\n");
case ASB_DISCARD_ZERO_CHG:
if (ch_peer == 0 && ch_self == 0) {
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
? -1 : 1;
break;
} else {
@@ -2227,7 +2216,7 @@ static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
rv = 1;
else /* ( ch_self == ch_peer ) */
/* Well, then use something else. */
- rv = test_bit(DISCARD_CONCURRENT, &mdev->flags)
+ rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
? -1 : 1;
break;
case ASB_DISCARD_LOCAL:
@@ -2392,7 +2381,9 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
(mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
- drbd_uuid_set_bm(mdev, 0UL);
+ drbd_uuid_move_history(mdev);
+ mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
+ mdev->ldev->md.uuid[UI_BITMAP] = 0;
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
@@ -2429,7 +2420,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
}
/* Common power [off|failure] */
- rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
+ rct = (drbd_test_flag(mdev, CRASHED_PRIMARY) ? 1 : 0) +
(mdev->p_uuid[UI_FLAGS] & 2);
/* lowest bit is set when we were primary,
* next bit (weight 2) is set when peer was primary */
@@ -2440,7 +2431,7 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
case 1: /* self_pri && !peer_pri */ return 1;
case 2: /* !self_pri && peer_pri */ return -1;
case 3: /* self_pri && peer_pri */
- dc = test_bit(DISCARD_CONCURRENT, &mdev->flags);
+ dc = drbd_test_flag(mdev, DISCARD_CONCURRENT);
return dc ? -1 : 1;
}
}
@@ -2500,8 +2491,8 @@ static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(l
if (mdev->agreed_pro_version < 91)
return -1091;
- _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
- _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
+ __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
+ __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
@@ -2554,11 +2545,14 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
mydisk = mdev->new_state_tmp.disk;
dev_info(DEV, "drbd_sync_handshake:\n");
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
hg = drbd_uuid_compare(mdev, &rule_nr);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
@@ -2654,7 +2648,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_rol
}
}
- if (mdev->net_conf->dry_run || test_bit(CONN_DRY_RUN, &mdev->flags)) {
+ if (mdev->net_conf->dry_run || drbd_test_flag(mdev, CONN_DRY_RUN)) {
if (hg == 0)
dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
else
@@ -2722,10 +2716,10 @@ static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsig
cf = be32_to_cpu(p->conn_flags);
p_want_lose = cf & CF_WANT_LOSE;
- clear_bit(CONN_DRY_RUN, &mdev->flags);
+ drbd_clear_flag(mdev, CONN_DRY_RUN);
if (cf & CF_DRY_RUN)
- set_bit(CONN_DRY_RUN, &mdev->flags);
+ drbd_set_flag(mdev, CONN_DRY_RUN);
if (p_proto != mdev->net_conf->wire_protocol) {
dev_err(DEV, "incompatible communication protocols\n");
@@ -2913,7 +2907,7 @@ static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
- rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
+ rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_NOIO);
if (!rs_plan_s) {
dev_err(DEV, "kmalloc of fifo_buffer failed");
goto disconnect;
@@ -3057,7 +3051,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
* needs to know my new size... */
drbd_send_sizes(mdev, 0, ddsf);
}
- if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
+ if (drbd_test_and_clear_flag(mdev, RESIZE_PENDING) ||
(dd == grew && mdev->state.conn == C_CONNECTED)) {
if (mdev->state.pdsk >= D_INCONSISTENT &&
mdev->state.disk >= D_INCONSISTENT) {
@@ -3066,7 +3060,7 @@ static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
else
resync_after_online_grow(mdev);
} else
- set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+ drbd_set_flag(mdev, RESYNC_AFTER_NEG);
}
}
@@ -3127,7 +3121,7 @@ static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
ongoing cluster wide state change is finished. That is important if
we are primary and are detaching from our disk. We need to see the
new disk state... */
- wait_event(mdev->misc_wait, !test_bit(CLUSTER_ST_CHANGE, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, CLUSTER_ST_CHANGE));
if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
@@ -3176,8 +3170,8 @@ static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsi
mask.i = be32_to_cpu(p->mask);
val.i = be32_to_cpu(p->val);
- if (test_bit(DISCARD_CONCURRENT, &mdev->flags) &&
- test_bit(CLUSTER_ST_CHANGE, &mdev->flags)) {
+ if (drbd_test_flag(mdev, DISCARD_CONCURRENT) &&
+ drbd_test_flag(mdev, CLUSTER_ST_CHANGE)) {
drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
return true;
}
@@ -3250,6 +3244,14 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
}
}
+ /* explicit verify finished notification, stop sector reached. */
+ if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
+ peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
+ ov_oos_print(mdev);
+ drbd_resync_finished(mdev);
+ return true;
+ }
+
/* peer says his disk is inconsistent, while we think it is uptodate,
* and this happens while the peer still thinks we have a sync going on,
* but we think we are already done with the sync.
@@ -3278,7 +3280,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
os.disk == D_NEGOTIATING));
/* if we have both been inconsistent, and the peer has been
* forced to be UpToDate with --overwrite-data */
- cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
+ cr |= drbd_test_flag(mdev, CONSIDER_RESYNC);
/* if we had been plain connected, and the admin requested to
* start a sync by "invalidate" or "invalidate-remote" */
cr |= (os.conn == C_CONNECTED &&
@@ -3298,7 +3300,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
peer_state.disk = D_DISKLESS;
real_peer_disk = D_DISKLESS;
} else {
- if (test_and_clear_bit(CONN_DRY_RUN, &mdev->flags))
+ if (drbd_test_and_clear_flag(mdev, CONN_DRY_RUN))
return false;
D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
@@ -3310,7 +3312,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
spin_lock_irq(&mdev->req_lock);
if (mdev->state.i != os.i)
goto retry;
- clear_bit(CONSIDER_RESYNC, &mdev->flags);
+ drbd_clear_flag(mdev, CONSIDER_RESYNC);
ns.peer = peer_state.role;
ns.pdsk = real_peer_disk;
ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
@@ -3318,14 +3320,14 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned
ns.disk = mdev->new_state_tmp.disk;
cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
- test_bit(NEW_CUR_UUID, &mdev->flags)) {
+ drbd_test_flag(mdev, NEW_CUR_UUID)) {
/* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
for temporal network outages! */
spin_unlock_irq(&mdev->req_lock);
dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
tl_clear(mdev);
drbd_uuid_new_current(mdev);
- clear_bit(NEW_CUR_UUID, &mdev->flags);
+ drbd_clear_flag(mdev, NEW_CUR_UUID);
drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
return false;
}
@@ -3929,7 +3931,7 @@ static void drbd_disconnect(struct drbd_conf *mdev)
/* serialize with bitmap writeout triggered by the state change,
* if any. */
- wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
/* tcp_close and release of sendpage pages can be deferred. I don't
* want to use SO_LINGER, because apparently it can be deferred for
@@ -4265,9 +4267,9 @@ static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
int retcode = be32_to_cpu(p->retcode);
if (retcode >= SS_SUCCESS) {
- set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
+ drbd_set_flag(mdev, CL_ST_CHG_SUCCESS);
} else {
- set_bit(CL_ST_CHG_FAIL, &mdev->flags);
+ drbd_set_flag(mdev, CL_ST_CHG_FAIL);
dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
drbd_set_st_err_str(retcode), retcode);
}
@@ -4286,7 +4288,7 @@ static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
{
/* restore idle timeout */
mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
- if (!test_and_set_bit(GOT_PING_ACK, &mdev->flags))
+ if (!drbd_test_and_set_flag(mdev, GOT_PING_ACK))
wake_up(&mdev->misc_wait);
return true;
@@ -4502,7 +4504,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
if (mdev->state.conn == C_AHEAD &&
atomic_read(&mdev->ap_in_flight) == 0 &&
- !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
+ !drbd_test_and_set_flag(mdev, AHEAD_TO_SYNC_SOURCE)) {
mdev->start_resync_timer.expires = jiffies + HZ;
add_timer(&mdev->start_resync_timer);
}
@@ -4612,7 +4614,7 @@ int drbd_asender(struct drbd_thread *thi)
while (get_t_state(thi) == Running) {
drbd_thread_current_set_cpu(mdev);
- if (test_and_clear_bit(SEND_PING, &mdev->flags)) {
+ if (drbd_test_and_clear_flag(mdev, SEND_PING)) {
ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
mdev->meta.socket->sk->sk_rcvtimeo =
mdev->net_conf->ping_timeo*HZ/10;
@@ -4625,12 +4627,12 @@ int drbd_asender(struct drbd_thread *thi)
3 < atomic_read(&mdev->unacked_cnt))
drbd_tcp_cork(mdev->meta.socket);
while (1) {
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_clear_flag(mdev, SIGNAL_ASENDER);
flush_signals(current);
if (!drbd_process_done_ee(mdev))
goto reconnect;
/* to avoid race with newly queued ACKs */
- set_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_set_flag(mdev, SIGNAL_ASENDER);
spin_lock_irq(&mdev->req_lock);
empty = list_empty(&mdev->done_ee);
spin_unlock_irq(&mdev->req_lock);
@@ -4650,7 +4652,7 @@ int drbd_asender(struct drbd_thread *thi)
rv = drbd_recv_short(mdev, mdev->meta.socket,
buf, expect-received, 0);
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_clear_flag(mdev, SIGNAL_ASENDER);
flush_signals(current);
@@ -4668,6 +4670,13 @@ int drbd_asender(struct drbd_thread *thi)
received += rv;
buf += rv;
} else if (rv == 0) {
+ if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
+ long t; /* time_left */
+ t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
+ mdev->net_conf->ping_timeo * HZ/10);
+ if (t)
+ break;
+ }
dev_err(DEV, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
@@ -4680,7 +4689,7 @@ int drbd_asender(struct drbd_thread *thi)
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
}
- set_bit(SEND_PING, &mdev->flags);
+ drbd_set_flag(mdev, SEND_PING);
continue;
} else if (rv == -EINTR) {
continue;
@@ -4738,7 +4747,7 @@ disconnect:
drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
drbd_md_sync(mdev);
}
- clear_bit(SIGNAL_ASENDER, &mdev->flags);
+ drbd_clear_flag(mdev, SIGNAL_ASENDER);
D_ASSERT(mdev->state.conn < C_CONNECTED);
dev_info(DEV, "asender terminated\n");
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 01b2ac641c7b..135ea76ed502 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -118,7 +118,7 @@ static void queue_barrier(struct drbd_conf *mdev)
* barrier/epoch object is added. This is the only place this bit is
* set. It indicates that the barrier for this epoch is already queued,
* and no new epoch has been created yet. */
- if (test_bit(CREATE_BARRIER, &mdev->flags))
+ if (drbd_test_flag(mdev, CREATE_BARRIER))
return;
b = mdev->newest_tle;
@@ -129,7 +129,7 @@ static void queue_barrier(struct drbd_conf *mdev)
* or (on connection loss) in tl_clear. */
inc_ap_pending(mdev);
drbd_queue_work(&mdev->data.work, &b->w);
- set_bit(CREATE_BARRIER, &mdev->flags);
+ drbd_set_flag(mdev, CREATE_BARRIER);
}
static void _about_to_complete_local_write(struct drbd_conf *mdev,
@@ -387,6 +387,20 @@ out_conflict:
return 1;
}
+static void drbd_report_io_error(struct drbd_conf *mdev, struct drbd_request *req)
+{
+ char b[BDEVNAME_SIZE];
+
+ if (__ratelimit(&drbd_ratelimit_state))
+ return;
+
+ dev_warn(DEV, "local %s IO error sector %llu+%u on %s\n",
+ (req->rq_state & RQ_WRITE) ? "WRITE" : "READ",
+ (unsigned long long)req->sector,
+ req->size >> 9,
+ bdevname(mdev->ldev->backing_bdev, b));
+}
+
/* obviously this could be coded as many single functions
* instead of one huge switch,
* or by putting the code directly in the respective locations
@@ -455,7 +469,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
req->rq_state |= RQ_LOCAL_COMPLETED;
req->rq_state &= ~RQ_LOCAL_PENDING;
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
_req_may_be_done_not_susp(req, m);
break;
@@ -477,7 +492,8 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break;
}
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
+ drbd_report_io_error(mdev, req);
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
goto_queue_for_net_read:
@@ -507,7 +523,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* corresponding hlist_del is in _req_may_be_done() */
hlist_add_head(&req->collision, ar_hash_slot(mdev, req->sector));
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ drbd_set_flag(mdev, UNPLUG_REMOTE);
D_ASSERT(req->rq_state & RQ_NET_PENDING);
req->rq_state |= RQ_NET_QUEUED;
@@ -541,11 +557,11 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
/* otherwise we may lose an unplug, which may cause some remote
* io-scheduler timeout to expire, increasing maximum latency,
* hurting performance. */
- set_bit(UNPLUG_REMOTE, &mdev->flags);
+ drbd_set_flag(mdev, UNPLUG_REMOTE);
/* see drbd_make_request_common,
* just after it grabs the req_lock */
- D_ASSERT(test_bit(CREATE_BARRIER, &mdev->flags) == 0);
+ D_ASSERT(drbd_test_flag(mdev, CREATE_BARRIER) == 0);
req->epoch = mdev->newest_tle->br_number;
@@ -888,7 +904,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* Empty flushes don't need to go into the activity log, they can only
* flush data for pending writes which are already in there. */
if (rw == WRITE && local && size
- && !test_bit(AL_SUSPENDED, &mdev->flags)) {
+ && !drbd_test_flag(mdev, AL_SUSPENDED)) {
req->rq_state |= RQ_IN_ACT_LOG;
drbd_al_begin_io(mdev, sector);
}
@@ -900,7 +916,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
if (!(local || remote) && !is_susp(mdev->state)) {
if (__ratelimit(&drbd_ratelimit_state))
- dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
+ dev_err(DEV, "IO ERROR: neither local nor remote data, sector %llu+%u\n",
+ (unsigned long long)req->sector, req->size >> 9);
goto fail_free_complete;
}
@@ -912,7 +929,7 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns
* if we lost that race, we retry. */
if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
+ drbd_test_flag(mdev, CREATE_BARRIER)) {
allocate_barrier:
b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_NOIO);
if (!b) {
@@ -955,7 +972,7 @@ allocate_barrier:
}
if (rw == WRITE && (remote || send_oos) &&
mdev->unused_spare_tle == NULL &&
- test_bit(CREATE_BARRIER, &mdev->flags)) {
+ drbd_test_flag(mdev, CREATE_BARRIER)) {
/* someone closed the current epoch
* while we were grabbing the spinlock */
spin_unlock_irq(&mdev->req_lock);
@@ -977,12 +994,12 @@ allocate_barrier:
* make sure that, if this is a write request and it triggered a
* barrier packet, this request is queued within the same spinlock. */
if ((remote || send_oos) && mdev->unused_spare_tle &&
- test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
+ drbd_test_and_clear_flag(mdev, CREATE_BARRIER)) {
_tl_add_barrier(mdev, mdev->unused_spare_tle);
mdev->unused_spare_tle = NULL;
} else {
D_ASSERT(!(remote && rw == WRITE &&
- test_bit(CREATE_BARRIER, &mdev->flags)));
+ drbd_test_flag(mdev, CREATE_BARRIER)));
}
/* NOTE
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 6bce2cc179d4..7cd32e73b016 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
if (list_empty(&mdev->read_ee))
wake_up(&mdev->ee_wait);
if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
+ __drbd_chk_io_error(mdev, DRBD_READ_ERROR);
spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w);
@@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
: list_empty(&mdev->active_ee);
if (test_bit(__EE_WAS_ERROR, &e->flags))
- __drbd_chk_io_error(mdev, DRBD_IO_ERROR);
+ __drbd_chk_io_error(mdev, DRBD_WRITE_ERROR);
spin_unlock_irqrestore(&mdev->req_lock, flags);
if (is_syncer_req)
@@ -227,6 +227,42 @@ void drbd_endio_pri(struct bio *bio, int error)
error = -EIO;
}
+ /* If this request was aborted locally before,
+ * but now was completed "successfully",
+ * chances are that this caused arbitrary data corruption.
+ *
+ * "aborting" requests, or force-detaching the disk, is intended for
+ * completely blocked/hung local backing devices which do no longer
+ * complete requests at all, not even do error completions. In this
+ * situation, usually a hard-reset and failover is the only way out.
+ *
+ * By "aborting", basically faking a local error-completion,
+ * we allow for a more graceful swichover by cleanly migrating services.
+ * Still the affected node has to be rebooted "soon".
+ *
+ * By completing these requests, we allow the upper layers to re-use
+ * the associated data pages.
+ *
+ * If later the local backing device "recovers", and now DMAs some data
+ * from disk into the original request pages, in the best case it will
+ * just put random data into unused pages; but typically it will corrupt
+ * meanwhile completely unrelated data, causing all sorts of damage.
+ *
+ * Which means delayed successful completion,
+ * especially for READ requests,
+ * is a reason to panic().
+ *
+ * We assume that a delayed *error* completion is OK,
+ * though we still will complain noisily about it.
+ */
+ if (unlikely(req->rq_state & RQ_LOCAL_ABORTED)) {
+ if (__ratelimit(&drbd_ratelimit_state))
+ dev_emerg(DEV, "delayed completion of aborted local request; disk-timeout may be too aggressive\n");
+
+ if (!error)
+ panic("possible random memory corruption caused by delayed completion of aborted local request\n");
+ }
+
/* to avoid recursion in __req_mod */
if (unlikely(error)) {
what = (bio_data_dir(bio) == WRITE)
@@ -691,6 +727,7 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
int number, i, size;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
+ bool stop_sector_reached = false;
if (unlikely(cancel))
return 1;
@@ -699,9 +736,17 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
sector = mdev->ov_position;
for (i = 0; i < number; i++) {
- if (sector >= capacity) {
+ if (sector >= capacity)
return 1;
- }
+
+ /* We check for "finished" only in the reply path:
+ * w_e_end_ov_reply().
+ * We need to send at least one request out. */
+ stop_sector_reached = i > 0
+ && mdev->agreed_pro_version >= 97
+ && sector >= mdev->ov_stop_sector;
+ if (stop_sector_reached)
+ break;
size = BM_BLOCK_SIZE;
@@ -725,7 +770,8 @@ static int w_make_ov_request(struct drbd_conf *mdev, struct drbd_work *w, int ca
requeue:
mdev->rs_in_flight += (i << (BM_BLOCK_SHIFT - 9));
- mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
+ if (i == 0 || !stop_sector_reached)
+ mod_timer(&mdev->resync_timer, jiffies + SLEEP_TIME);
return 1;
}
@@ -747,7 +793,7 @@ int w_start_resync(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
}
drbd_start_resync(mdev, C_SYNC_SOURCE);
- clear_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags);
+ drbd_clear_flag(mdev, AHEAD_TO_SYNC_SOURCE);
return 1;
}
@@ -771,10 +817,10 @@ static int w_resync_finished(struct drbd_conf *mdev, struct drbd_work *w, int ca
static void ping_peer(struct drbd_conf *mdev)
{
- clear_bit(GOT_PING_ACK, &mdev->flags);
+ drbd_clear_flag(mdev, GOT_PING_ACK);
request_ping(mdev);
wait_event(mdev->misc_wait,
- test_bit(GOT_PING_ACK, &mdev->flags) || mdev->state.conn < C_CONNECTED);
+ drbd_test_flag(mdev, GOT_PING_ACK) || mdev->state.conn < C_CONNECTED);
}
int drbd_resync_finished(struct drbd_conf *mdev)
@@ -808,7 +854,12 @@ int drbd_resync_finished(struct drbd_conf *mdev)
dt = (jiffies - mdev->rs_start - mdev->rs_paused) / HZ;
if (dt <= 0)
dt = 1;
+
db = mdev->rs_total;
+ /* adjust for verify start and stop sectors, respective reached position */
+ if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
+ db -= mdev->ov_left;
+
dbdt = Bit2KB(db/dt);
mdev->rs_paused /= HZ;
@@ -831,7 +882,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
ns.conn = C_CONNECTED;
dev_info(DEV, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
- verify_done ? "Online verify " : "Resync",
+ verify_done ? "Online verify" : "Resync",
dt + mdev->rs_paused, mdev->rs_paused, dbdt);
n_oos = drbd_bm_total_weight(mdev);
@@ -912,7 +963,9 @@ out:
mdev->rs_total = 0;
mdev->rs_failed = 0;
mdev->rs_paused = 0;
- if (verify_done)
+
+ /* reset start sector, if we reached end of device */
+ if (verify_done && mdev->ov_left == 0)
mdev->ov_start_sector = 0;
drbd_md_sync(mdev);
@@ -1158,6 +1211,7 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
unsigned int size = e->size;
int digest_size;
int ok, eq = 0;
+ bool stop_sector_reached = false;
if (unlikely(cancel)) {
drbd_free_ee(mdev, e);
@@ -1208,7 +1262,10 @@ int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
if ((mdev->ov_left & 0x200) == 0x200)
drbd_advance_rs_marks(mdev, mdev->ov_left);
- if (mdev->ov_left == 0) {
+ stop_sector_reached = mdev->agreed_pro_version >= 97 &&
+ (sector + (size>>9)) >= mdev->ov_stop_sector;
+
+ if (mdev->ov_left == 0 || stop_sector_reached) {
ov_oos_print(mdev);
drbd_resync_finished(mdev);
}
@@ -1692,8 +1749,8 @@ int drbd_worker(struct drbd_thread *thi)
NS(conn, C_NETWORK_FAILURE));
}
}
- D_ASSERT(test_bit(DEVICE_DYING, &mdev->flags));
- D_ASSERT(test_bit(CONFIG_PENDING, &mdev->flags));
+ D_ASSERT(drbd_test_flag(mdev, DEVICE_DYING));
+ D_ASSERT(drbd_test_flag(mdev, CONFIG_PENDING));
spin_lock_irq(&mdev->data.work.q_lock);
i = 0;
@@ -1726,8 +1783,8 @@ int drbd_worker(struct drbd_thread *thi)
dev_info(DEV, "worker terminated\n");
- clear_bit(DEVICE_DYING, &mdev->flags);
- clear_bit(CONFIG_PENDING, &mdev->flags);
+ drbd_clear_flag(mdev, DEVICE_DYING);
+ drbd_clear_flag(mdev, CONFIG_PENDING);
wake_up(&mdev->state_wait);
return 0;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 3bba65510d23..54046e51160a 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -976,8 +976,21 @@ static int loop_clr_fd(struct loop_device *lo)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- if (lo->lo_refcnt > 1) /* we needed one fd for the ioctl */
- return -EBUSY;
+ /*
+ * If we've explicitly asked to tear down the loop device,
+ * and it has an elevated reference count, set it for auto-teardown when
+ * the last reference goes away. This stops $!~#$@ udev from
+ * preventing teardown because it decided that it needs to run blkid on
+ * the loopback device whenever they appear. xfstests is notorious for
+ * failing tests because blkid via udev races with a losetup
+ * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
+ * command to fail with EBUSY.
+ */
+ if (lo->lo_refcnt > 1) {
+ lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
+ mutex_unlock(&lo->lo_ctl_mutex);
+ return 0;
+ }
if (filp == NULL)
return -EINVAL;
@@ -1038,10 +1051,10 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
struct loop_func_table *xfer;
- uid_t uid = current_uid();
+ kuid_t uid = current_uid();
if (lo->lo_encrypt_key_size &&
- lo->lo_key_owner != uid &&
+ !uid_eq(lo->lo_key_owner, uid) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
if (lo->lo_state != Lo_bound)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index a8fddeb3d638..adc6f36564cf 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -1148,11 +1148,15 @@ static bool mtip_pause_ncq(struct mtip_port *port,
reply = port->rxfis + RX_FIS_D2H_REG;
task_file_data = readl(port->mmio+PORT_TFDATA);
- if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT))
+ if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+ clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
+
+ if ((task_file_data & 1))
return false;
if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag);
port->ic_pause_timer = jiffies;
return true;
} else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
@@ -1900,7 +1904,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
int rv = 0, xfer_sz = command[3];
if (xfer_sz) {
- if (user_buffer)
+ if (!user_buffer)
return -EFAULT;
buf = dmam_alloc_coherent(&port->dd->pdev->dev,
@@ -2031,8 +2035,9 @@ static unsigned int implicit_sector(unsigned char command,
}
return rv;
}
-
-static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
+static void mtip_set_timeout(struct driver_data *dd,
+ struct host_to_dev_fis *fis,
+ unsigned int *timeout, u8 erasemode)
{
switch (fis->command) {
case ATA_CMD_DOWNLOAD_MICRO:
@@ -2040,10 +2045,13 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
break;
case ATA_CMD_SEC_ERASE_UNIT:
case 0xFC:
- *timeout = 240000; /* 4 minutes */
+ if (erasemode)
+ *timeout = ((*(dd->port->identify + 90) * 2) * 60000);
+ else
+ *timeout = ((*(dd->port->identify + 89) * 2) * 60000);
break;
case ATA_CMD_STANDBYNOW1:
- *timeout = 10000; /* 10 seconds */
+ *timeout = 120000; /* 2 minutes */
break;
case 0xF7:
case 0xFA:
@@ -2083,6 +2091,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
unsigned int transfer_size;
unsigned long task_file_data;
int intotal = outtotal + req_task->out_size;
+ int erasemode = 0;
taskout = req_task->out_size;
taskin = req_task->in_size;
@@ -2208,7 +2217,13 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.lba_hi,
fis.device);
- mtip_set_timeout(&fis, &timeout);
+ /* check for erase mode support during secure erase.*/
+ if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
+ && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+ erasemode = 1;
+ }
+
+ mtip_set_timeout(dd, &fis, &timeout, erasemode);
/* Determine the correct transfer size.*/
if (force_single_sector)
@@ -2588,9 +2603,6 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
if (!len || size)
return 0;
- if (size < 0)
- return -EINVAL;
-
size += sprintf(&buf[size], "H/ S ACTive : [ 0x");
for (n = dd->slot_groups-1; n >= 0; n--)
@@ -2660,9 +2672,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
if (!len || size)
return 0;
- if (size < 0)
- return -EINVAL;
-
size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n",
dd->port->flags);
size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n",
@@ -3214,8 +3223,8 @@ static int mtip_hw_init(struct driver_data *dd)
"Unable to check write protect progress\n");
else
dev_info(&dd->pdev->dev,
- "Write protect progress: %d%% (%d blocks)\n",
- attr242.cur, attr242.data);
+ "Write protect progress: %u%% (%u blocks)\n",
+ attr242.cur, le32_to_cpu(attr242.data));
return rv;
out3:
@@ -3619,6 +3628,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
bio_endio(bio, -ENODATA);
return;
}
+ if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) {
+ bio_endio(bio, -ENODATA);
+ return;
+ }
}
if (unlikely(!bio_has_data(bio))) {
@@ -4168,7 +4181,13 @@ static void mtip_pci_shutdown(struct pci_dev *pdev)
/* Table of device ids supported by this driver. */
static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
- { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) },
{ 0 }
};
@@ -4199,12 +4218,12 @@ static int __init mtip_init(void)
{
int error;
- printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
+ pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
/* Allocate a major block device number to use with this driver. */
error = register_blkdev(0, MTIP_DRV_NAME);
if (error <= 0) {
- printk(KERN_ERR "Unable to register block device (%d)\n",
+ pr_err("Unable to register block device (%d)\n",
error);
return -EBUSY;
}
@@ -4213,7 +4232,7 @@ static int __init mtip_init(void)
if (!dfs_parent) {
dfs_parent = debugfs_create_dir("rssd", NULL);
if (IS_ERR_OR_NULL(dfs_parent)) {
- printk(KERN_WARNING "Error creating debugfs parent\n");
+ pr_warn("Error creating debugfs parent\n");
dfs_parent = NULL;
}
}
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index f51fc23d17bb..5f4a917bd8bb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -33,6 +33,9 @@
/* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
+/* check for erase mode support during secure erase */
+#define MTIP_SEC_ERASE_MODE 0x3
+
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
@@ -76,7 +79,13 @@
/* Micron Vendor ID & P320x SSD Device ID */
#define PCI_VENDOR_ID_MICRON 0x1344
-#define P320_DEVICE_ID 0x5150
+#define P320H_DEVICE_ID 0x5150
+#define P320M_DEVICE_ID 0x5151
+#define P320S_DEVICE_ID 0x5152
+#define P325M_DEVICE_ID 0x5153
+#define P420H_DEVICE_ID 0x5160
+#define P420M_DEVICE_ID 0x5161
+#define P425M_DEVICE_ID 0x5163
/* Driver name and version strings */
#define MTIP_DRV_NAME "mtip32xx"
@@ -131,10 +140,12 @@ enum {
MTIP_PF_SVC_THD_STOP_BIT = 8,
/* below are bit numbers in 'dd_flag' defined in driver_data */
+ MTIP_DDF_SEC_LOCK_BIT = 0,
MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3,
MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_DDF_SEC_LOCK_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 87311ebac0db..1bbc681688e4 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -266,11 +266,10 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
struct bio *tmp, *new_chain = NULL, *tail = NULL;
while (old_chain) {
- tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
+ tmp = bio_clone_kmalloc(old_chain, gfpmask);
if (!tmp)
goto err_out;
- __bio_clone(tmp, old_chain);
tmp->bi_bdev = NULL;
gfpmask &= ~__GFP_WAIT;
tmp->bi_next = NULL;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ba66e4445f41..2e7de7a59bfc 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -522,38 +522,6 @@ static void pkt_bio_finished(struct pktcdvd_device *pd)
}
}
-static void pkt_bio_destructor(struct bio *bio)
-{
- kfree(bio->bi_io_vec);
- kfree(bio);
-}
-
-static struct bio *pkt_bio_alloc(int nr_iovecs)
-{
- struct bio_vec *bvl = NULL;
- struct bio *bio;
-
- bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
- if (!bio)
- goto no_bio;
- bio_init(bio);
-
- bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
- if (!bvl)
- goto no_bvl;
-
- bio->bi_max_vecs = nr_iovecs;
- bio->bi_io_vec = bvl;
- bio->bi_destructor = pkt_bio_destructor;
-
- return bio;
-
- no_bvl:
- kfree(bio);
- no_bio:
- return NULL;
-}
-
/*
* Allocate a packet_data struct
*/
@@ -567,7 +535,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
goto no_pkt;
pkt->frames = frames;
- pkt->w_bio = pkt_bio_alloc(frames);
+ pkt->w_bio = bio_kmalloc(GFP_KERNEL, frames);
if (!pkt->w_bio)
goto no_bio;
@@ -581,9 +549,10 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
- struct bio *bio = pkt_bio_alloc(1);
+ struct bio *bio = bio_kmalloc(GFP_KERNEL, 1);
if (!bio)
goto no_rd_bio;
+
pkt->r_bios[i] = bio;
}
@@ -1111,21 +1080,17 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
* Schedule reads for missing parts of the packet.
*/
for (f = 0; f < pkt->frames; f++) {
- struct bio_vec *vec;
-
int p, offset;
+
if (written[f])
continue;
+
bio = pkt->r_bios[f];
- vec = bio->bi_io_vec;
- bio_init(bio);
- bio->bi_max_vecs = 1;
+ bio_reset(bio);
bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt;
- bio->bi_io_vec = vec;
- bio->bi_destructor = pkt_bio_destructor;
p = (f * CD_FRAMESIZE) / PAGE_SIZE;
offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
@@ -1418,14 +1383,11 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
}
/* Start the write request */
- bio_init(pkt->w_bio);
- pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
+ bio_reset(pkt->w_bio);
pkt->w_bio->bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt;
- pkt->w_bio->bi_io_vec = bvec;
- pkt->w_bio->bi_destructor = pkt_bio_destructor;
for (f = 0; f < pkt->frames; f++)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 9ad3b5ec1dc1..9a54623e52d7 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -158,8 +158,8 @@ struct xen_vbd {
struct block_device *bdev;
/* Cached size parameter. */
sector_t size;
- bool flush_support;
- bool discard_secure;
+ unsigned int flush_support:1;
+ unsigned int discard_secure:1;
};
struct backend_info;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 4f66171c6683..f58434c2617c 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -105,11 +105,10 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
{
struct xen_blkif *blkif;
- blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
+ blkif = kmem_cache_zalloc(xen_blkif_cachep, GFP_KERNEL);
if (!blkif)
return ERR_PTR(-ENOMEM);
- memset(blkif, 0, sizeof(*blkif));
blkif->domid = domid;
spin_lock_init(&blkif->blk_ring_lock);
atomic_set(&blkif->refcnt, 1);
@@ -196,7 +195,7 @@ static void xen_blkif_disconnect(struct xen_blkif *blkif)
}
}
-void xen_blkif_free(struct xen_blkif *blkif)
+static void xen_blkif_free(struct xen_blkif *blkif)
{
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
@@ -257,7 +256,7 @@ static struct attribute_group xen_vbdstat_group = {
VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
VBD_SHOW(mode, "%s\n", be->mode);
-int xenvbd_sysfs_addif(struct xenbus_device *dev)
+static int xenvbd_sysfs_addif(struct xenbus_device *dev)
{
int error;
@@ -281,7 +280,7 @@ fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
return error;
}
-void xenvbd_sysfs_delif(struct xenbus_device *dev)
+static void xenvbd_sysfs_delif(struct xenbus_device *dev)
{
sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
device_remove_file(&dev->dev, &dev_attr_mode);
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 54a3a6d09819..0bb207eaef2f 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
static const struct file_operations raw_fops = {
.read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .aio_read = blkdev_aio_read,
.write = do_sync_write,
.aio_write = blkdev_aio_write,
.fsync = blkdev_fsync,
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b16c8a72a2e2..ba7926f5c099 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ
config GPIO_MC9S08DZ60
bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions"
- depends on I2C && MACH_MX35_3DS
+ depends on I2C=y && MACH_MX35_3DS
help
Select this to enable the MC9S08DZ60 GPIO driver
diff --git a/drivers/gpio/gpio-em.c b/drivers/gpio/gpio-em.c
index ae37181798b3..ec48ed512628 100644
--- a/drivers/gpio/gpio-em.c
+++ b/drivers/gpio/gpio-em.c
@@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p)
p->irq_base = irq_alloc_descs(pdata->irq_base, 0,
pdata->number_of_pins, numa_node_id());
- if (IS_ERR_VALUE(p->irq_base)) {
+ if (p->irq_base < 0) {
dev_err(&pdev->dev, "cannot get irq_desc\n");
- return -ENXIO;
+ return p->irq_base;
}
pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n",
pdata->gpio_base, pdata->number_of_pins, p->irq_base);
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index e97016af6443..b62d443e9a59 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
rdc321x_gpio_dev->reg2_data_base = r->start + 0x4;
rdc321x_gpio_dev->chip.label = "rdc321x-gpio";
+ rdc321x_gpio_dev->chip.owner = THIS_MODULE;
rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input;
rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config;
rdc321x_gpio_dev->chip.get = rdc_gpio_get_value;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index a18c4aa68b1e..f1a45997aea8 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
gpiochip_find(&gg_data, of_gpiochip_find_and_xlate);
of_node_put(gg_data.gpiospec.np);
- pr_debug("%s exited with status %d\n", __func__, ret);
+ pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio);
return gg_data.out_gpio;
}
EXPORT_SYMBOL(of_get_named_gpio_flags);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 0f9c146fc00d..4d524b5f52f5 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -439,7 +439,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
struct dj_report *dj_report;
int retval;
- dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
dj_report->report_id = REPORT_ID_DJ_SHORT;
@@ -456,7 +456,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report;
int retval;
- dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL);
+ dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
dj_report->report_id = REPORT_ID_DJ_SHORT;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 664743d6a6cd..bbf459bca61d 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -798,14 +798,6 @@ static int crypt_convert(struct crypt_config *cc,
return 0;
}
-static void dm_crypt_bio_destructor(struct bio *bio)
-{
- struct dm_crypt_io *io = bio->bi_private;
- struct crypt_config *cc = io->cc;
-
- bio_free(bio, cc->bs);
-}
-
/*
* Generate a new unfragmented bio with the given size
* This should never violate the device limitations
@@ -974,7 +966,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
clone->bi_rw = io->base_bio->bi_rw;
- clone->bi_destructor = dm_crypt_bio_destructor;
}
static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
@@ -988,19 +979,14 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data *afterwards*.
*/
- clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
+ clone = bio_clone_bioset(base_bio, gfp, cc->bs);
if (!clone)
return 1;
crypt_inc_pending(io);
clone_init(io, clone);
- clone->bi_idx = 0;
- clone->bi_vcnt = bio_segments(base_bio);
- clone->bi_size = base_bio->bi_size;
clone->bi_sector = cc->start + io->sector;
- memcpy(clone->bi_io_vec, bio_iovec(base_bio),
- sizeof(struct bio_vec) * clone->bi_vcnt);
generic_make_request(clone);
return 0;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ea5dd289fe2a..1c46f97d6664 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -249,16 +249,6 @@ static void vm_dp_init(struct dpages *dp, void *data)
dp->context_ptr = data;
}
-static void dm_bio_destructor(struct bio *bio)
-{
- unsigned region;
- struct io *io;
-
- retrieve_io_and_region_from_bio(bio, &io, &region);
-
- bio_free(bio, io->client->bios);
-}
-
/*
* Functions for getting the pages from kernel memory.
*/
@@ -317,7 +307,6 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
- bio->bi_destructor = dm_bio_destructor;
store_io_and_region_in_bio(bio, io, region);
if (rw & REQ_DISCARD) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4e09b6ff5b49..837879716889 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -86,12 +86,17 @@ struct dm_rq_target_io {
};
/*
- * For request-based dm.
- * One of these is allocated per bio.
+ * For request-based dm - the bio clones we allocate are embedded in these
+ * structs.
+ *
+ * We allocate these with bio_alloc_bioset, using the front_pad parameter when
+ * the bioset is created - this means the bio has to come at the end of the
+ * struct.
*/
struct dm_rq_clone_bio_info {
struct bio *orig;
struct dm_rq_target_io *tio;
+ struct bio clone;
};
union map_info *dm_get_mapinfo(struct bio *bio)
@@ -211,6 +216,11 @@ struct dm_md_mempools {
static struct kmem_cache *_io_cache;
static struct kmem_cache *_tio_cache;
static struct kmem_cache *_rq_tio_cache;
+
+/*
+ * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
+ * still used for _io_cache, I'm leaving this for a later cleanup
+ */
static struct kmem_cache *_rq_bio_info_cache;
static int __init local_init(void)
@@ -467,16 +477,6 @@ static void free_rq_tio(struct dm_rq_target_io *tio)
mempool_free(tio, tio->md->tio_pool);
}
-static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
-{
- return mempool_alloc(md->io_pool, GFP_ATOMIC);
-}
-
-static void free_bio_info(struct dm_rq_clone_bio_info *info)
-{
- mempool_free(info, info->tio->md->io_pool);
-}
-
static int md_in_flight(struct mapped_device *md)
{
return atomic_read(&md->pending[READ]) +
@@ -681,11 +681,6 @@ static void clone_endio(struct bio *bio, int error)
}
}
- /*
- * Store md for cleanup instead of tio which is about to get freed.
- */
- bio->bi_private = md->bs;
-
free_tio(md, tio);
bio_put(bio);
dec_pending(io, error);
@@ -1032,11 +1027,6 @@ static void __map_bio(struct dm_target *ti, struct bio *clone,
/* error the io and bail out, or requeue it if needed */
md = tio->io->md;
dec_pending(tio->io, r);
- /*
- * Store bio_set for cleanup.
- */
- clone->bi_end_io = NULL;
- clone->bi_private = md->bs;
bio_put(clone);
free_tio(md, tio);
} else if (r) {
@@ -1055,13 +1045,6 @@ struct clone_info {
unsigned short idx;
};
-static void dm_bio_destructor(struct bio *bio)
-{
- struct bio_set *bs = bio->bi_private;
-
- bio_free(bio, bs);
-}
-
/*
* Creates a little bio that just does part of a bvec.
*/
@@ -1073,7 +1056,6 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
struct bio_vec *bv = bio->bi_io_vec + idx;
clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
- clone->bi_destructor = dm_bio_destructor;
*clone->bi_io_vec = *bv;
clone->bi_sector = sector;
@@ -1086,7 +1068,7 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
clone->bi_flags |= 1 << BIO_CLONED;
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO, bs);
+ bio_integrity_clone(clone, bio, GFP_NOIO);
bio_integrity_trim(clone,
bio_sector_offset(bio, idx, offset), len);
}
@@ -1105,7 +1087,6 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
__bio_clone(clone, bio);
- clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
clone->bi_idx = idx;
clone->bi_vcnt = idx + bv_count;
@@ -1113,7 +1094,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
clone->bi_flags &= ~(1 << BIO_SEG_VALID);
if (bio_integrity(bio)) {
- bio_integrity_clone(clone, bio, GFP_NOIO, bs);
+ bio_integrity_clone(clone, bio, GFP_NOIO);
if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
bio_integrity_trim(clone,
@@ -1148,9 +1129,8 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
- clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
- __bio_clone(clone, ci->bio);
- clone->bi_destructor = dm_bio_destructor;
+ clone = bio_clone_bioset(ci->bio, GFP_NOIO, ci->md->bs);
+
if (len) {
clone->bi_sector = ci->sector;
clone->bi_size = to_bytes(len);
@@ -1480,30 +1460,17 @@ void dm_dispatch_request(struct request *rq)
}
EXPORT_SYMBOL_GPL(dm_dispatch_request);
-static void dm_rq_bio_destructor(struct bio *bio)
-{
- struct dm_rq_clone_bio_info *info = bio->bi_private;
- struct mapped_device *md = info->tio->md;
-
- free_bio_info(info);
- bio_free(bio, md->bs);
-}
-
static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
void *data)
{
struct dm_rq_target_io *tio = data;
- struct mapped_device *md = tio->md;
- struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
-
- if (!info)
- return -ENOMEM;
+ struct dm_rq_clone_bio_info *info =
+ container_of(bio, struct dm_rq_clone_bio_info, clone);
info->orig = bio_orig;
info->tio = tio;
bio->bi_end_io = end_clone_bio;
bio->bi_private = info;
- bio->bi_destructor = dm_rq_bio_destructor;
return 0;
}
@@ -2738,7 +2705,10 @@ struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity)
if (!pools->tio_pool)
goto free_io_pool_and_out;
- pools->bs = bioset_create(pool_size, 0);
+ pools->bs = (type == DM_TYPE_BIO_BASED) ?
+ bioset_create(pool_size, 0) :
+ bioset_create(pool_size,
+ offsetof(struct dm_rq_clone_bio_info, clone));
if (!pools->bs)
goto free_tio_pool_and_out;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3f6203a4c7ea..7a2b0793f66e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -155,32 +155,17 @@ static int start_readonly;
* like bio_clone, but with a local bio set
*/
-static void mddev_bio_destructor(struct bio *bio)
-{
- struct mddev *mddev, **mddevp;
-
- mddevp = (void*)bio;
- mddev = mddevp[-1];
-
- bio_free(bio, mddev->bio_set);
-}
-
struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
struct mddev *mddev)
{
struct bio *b;
- struct mddev **mddevp;
if (!mddev || !mddev->bio_set)
return bio_alloc(gfp_mask, nr_iovecs);
- b = bio_alloc_bioset(gfp_mask, nr_iovecs,
- mddev->bio_set);
+ b = bio_alloc_bioset(gfp_mask, nr_iovecs, mddev->bio_set);
if (!b)
return NULL;
- mddevp = (void*)b;
- mddevp[-1] = mddev;
- b->bi_destructor = mddev_bio_destructor;
return b;
}
EXPORT_SYMBOL_GPL(bio_alloc_mddev);
@@ -188,32 +173,10 @@ EXPORT_SYMBOL_GPL(bio_alloc_mddev);
struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
struct mddev *mddev)
{
- struct bio *b;
- struct mddev **mddevp;
-
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
- b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
- mddev->bio_set);
- if (!b)
- return NULL;
- mddevp = (void*)b;
- mddevp[-1] = mddev;
- b->bi_destructor = mddev_bio_destructor;
- __bio_clone(b, bio);
- if (bio_integrity(bio)) {
- int ret;
-
- ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set);
-
- if (ret < 0) {
- bio_put(b);
- return NULL;
- }
- }
-
- return b;
+ return bio_clone_bioset(bio, gfp_mask, mddev->bio_set);
}
EXPORT_SYMBOL_GPL(bio_clone_mddev);
@@ -5006,8 +4969,7 @@ int md_run(struct mddev *mddev)
}
if (mddev->bio_set == NULL)
- mddev->bio_set = bioset_create(BIO_POOL_SIZE,
- sizeof(struct mddev *));
+ mddev->bio_set = bioset_create(BIO_POOL_SIZE, 0);
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index de63a1fc3737..a9e4fa95dfaa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -422,6 +422,7 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
+ blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f1c84decb192..172a768036d8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
- if (req->cmd_flags & REQ_SECURE)
+ if (req->cmd_flags & REQ_SECURE &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
@@ -1716,6 +1717,7 @@ force_ro_fail:
#define CID_MANFID_SANDISK 0x2
#define CID_MANFID_TOSHIBA 0x11
#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
static const struct mmc_fixup blk_fixups[] =
{
@@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] =
MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
END_FIXUP
};
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 322412cec4ee..a53c7c478e05 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -81,6 +81,7 @@ struct atmel_mci_caps {
bool has_bad_data_ordering;
bool need_reset_after_xfer;
bool need_blksz_mul_4;
+ bool need_notbusy_for_read_ops;
};
struct atmel_mci_dma {
@@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv)
__func__);
atmci_set_completed(host, EVENT_XFER_COMPLETE);
- if (host->data->flags & MMC_DATA_WRITE) {
+ if (host->caps.need_notbusy_for_read_ops ||
+ (host->data->flags & MMC_DATA_WRITE)) {
atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
state = STATE_WAITING_NOTBUSY;
} else if (host->mrq->stop) {
@@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
host->caps.has_bad_data_ordering = 1;
host->caps.need_reset_after_xfer = 1;
host->caps.need_blksz_mul_4 = 1;
+ host->caps.need_notbusy_for_read_ops = 0;
/* keep only major version number */
switch (version & 0xf00) {
@@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host)
case 0x200:
host->caps.has_rwproof = 1;
host->caps.need_blksz_mul_4 = 0;
+ host->caps.need_notbusy_for_read_ops = 1;
case 0x100:
host->caps.has_bad_data_ordering = 0;
host->caps.need_reset_after_xfer = 0;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 03666174ca48..a17dd7363ceb 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -49,13 +49,6 @@
#define bfin_write_SDH_CFG bfin_write_RSI_CFG
#endif
-struct dma_desc_array {
- unsigned long start_addr;
- unsigned short cfg;
- unsigned short x_count;
- short x_modify;
-} __packed;
-
struct sdh_host {
struct mmc_host *mmc;
spinlock_t lock;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 72dc3cde646d..af40d227bece 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
{
struct dw_mci *host = slot->host;
u32 div;
+ u32 clk_en_a;
if (slot->clock != host->current_speed) {
div = host->bus_hz / slot->clock;
@@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
mci_send_cmd(slot,
SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0);
- /* enable clock */
- mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE |
- SDMMC_CLKEN_LOW_PWR) << slot->id));
+ /* enable clock; only low power if no SDIO */
+ clk_en_a = SDMMC_CLKEN_ENABLE << slot->id;
+ if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id)))
+ clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id;
+ mci_writel(host, CLKENA, clk_en_a);
/* inform CIU */
mci_send_cmd(slot,
@@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
+/*
+ * Disable lower power mode.
+ *
+ * Low power mode will stop the card clock when idle. According to the
+ * description of the CLKENA register we should disable low power mode
+ * for SDIO cards if we need SDIO interrupts to work.
+ *
+ * This function is fast if low power mode is already disabled.
+ */
+static void dw_mci_disable_low_power(struct dw_mci_slot *slot)
+{
+ struct dw_mci *host = slot->host;
+ u32 clk_en_a;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+
+ clk_en_a = mci_readl(host, CLKENA);
+
+ if (clk_en_a & clken_low_pwr) {
+ mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
+ SDMMC_CMD_PRV_DAT_WAIT, 0);
+ }
+}
+
static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
@@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
/* Enable/disable Slot Specific SDIO interrupt */
int_mask = mci_readl(host, INTMASK);
if (enb) {
+ /*
+ * Turn off low power mode if it was enabled. This is a bit of
+ * a heavy operation and we disable / enable IRQs a lot, so
+ * we'll leave low power mode disabled and it will get
+ * re-enabled again in dw_mci_setup_bus().
+ */
+ dw_mci_disable_low_power(slot);
+
mci_writel(host, INTMASK,
(int_mask | SDMMC_INT_SDIO(slot->id)));
} else {
@@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
nbytes += len;
remain -= len;
} while (remain);
- sg_miter->consumed = offset;
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
- if (status & DW_MCI_DATA_ERROR_FLAGS) {
- host->data_status = status;
- data->bytes_xfered += nbytes;
- sg_miter_stop(sg_miter);
- host->sg = NULL;
- smp_wmb();
-
- set_bit(EVENT_DATA_ERROR, &host->pending_events);
-
- tasklet_schedule(&host->tasklet);
- return;
- }
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
data->bytes_xfered += nbytes;
@@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
nbytes += len;
remain -= len;
} while (remain);
- sg_miter->consumed = offset;
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
- if (status & DW_MCI_DATA_ERROR_FLAGS) {
- host->data_status = status;
- data->bytes_xfered += nbytes;
- sg_miter_stop(sg_miter);
- host->sg = NULL;
-
- smp_wmb();
-
- set_bit(EVENT_DATA_ERROR, &host->pending_events);
-
- tasklet_schedule(&host->tasklet);
- return;
- }
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
data->bytes_xfered += nbytes;
@@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status)
static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
{
struct dw_mci *host = dev_id;
- u32 status, pending;
+ u32 pending;
unsigned int pass_count = 0;
int i;
do {
- status = mci_readl(host, RINTSTS);
pending = mci_readl(host, MINTSTS); /* read-only mask reg */
/*
@@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_CMD_ERROR_FLAGS) {
mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS);
- host->cmd_status = status;
+ host->cmd_status = pending;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
}
@@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
/* if there is an error report DATA_ERROR */
mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS);
- host->data_status = status;
+ host->data_status = pending;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC |
- SDMMC_INT_SBE | SDMMC_INT_EBE)))
- tasklet_schedule(&host->tasklet);
+ tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER);
if (!host->data_status)
- host->data_status = status;
+ host->data_status = pending;
smp_wmb();
if (host->dir_status == DW_MCI_RECV_STATUS) {
if (host->sg != NULL)
@@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (pending & SDMMC_INT_CMD_DONE) {
mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE);
- dw_mci_cmd_interrupt(host, status);
+ dw_mci_cmd_interrupt(host, pending);
}
if (pending & SDMMC_INT_CD) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index a51f9309ffbb..ad3fcea1269e 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
writel(stat & MXS_MMC_IRQ_BITS,
host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
+ spin_unlock(&host->lock);
+
if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
mmc_signal_sdio_irq(host->mmc);
- spin_unlock(&host->lock);
-
if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ)
cmd->error = -ETIMEDOUT;
else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ)
@@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
-
- if (readl(host->base + HW_SSP_STATUS(host)) &
- BM_SSP_STATUS_SDIO_IRQ)
- mmc_signal_sdio_irq(host->mmc);
-
} else {
writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
@@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
}
spin_unlock_irqrestore(&host->lock, flags);
+
+ if (enable && readl(host->base + HW_SSP_STATUS(host)) &
+ BM_SSP_STATUS_SDIO_IRQ)
+ mmc_signal_sdio_irq(host->mmc);
+
}
static const struct mmc_host_ops mxs_mmc_ops = {
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 50e08f03aa65..a5999a74496a 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data)
static void
mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
{
- int n;
+ int n, nwords;
if (host->buffer_bytes_left == 0) {
host->sg_idx++;
@@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)
n = 64;
if (n > host->buffer_bytes_left)
n = host->buffer_bytes_left;
+
+ nwords = n / 2;
+ nwords += n & 1; /* handle odd number of bytes to transfer */
+
host->buffer_bytes_left -= n;
host->total_bytes_left -= n;
host->data->bytes_xfered += n;
if (write) {
- __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
+ __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA),
+ host->buffer, nwords);
} else {
- __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n);
+ __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA),
+ host->buffer, nwords);
}
+
+ host->buffer += nwords;
}
static inline void mmc_omap_report_irq(u16 status)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index b97b2f5dafdb..d25f9ab9a54d 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
int div = 1;
u32 temp;
+ if (clock == 0)
+ goto out;
+
temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL);
temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN
| ESDHC_CLOCK_MASK);
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- if (clock == 0)
- goto out;
-
while (host->max_clk / pre_div / 16 > clock && pre_div < 256)
pre_div *= 2;
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 437bc193e170..568307cc7caf 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -340,7 +340,7 @@ retry:
* of this LEB as it will be deleted and freed in 'ubi_add_to_av()'.
*/
err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0);
- kfree(new_aeb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
@@ -353,7 +353,7 @@ write_error:
list_add(&new_aeb->u.list, &ai->erase);
goto retry;
}
- kfree(new_aeb);
+ kmem_cache_free(ai->aeb_slab_cache, new_aeb);
out_free:
ubi_free_vid_hdr(ubi, vid_hdr);
return err;
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 4f50145f6483..662c5f7eb0c5 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev)
priv = netdev_priv(dev);
dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ priv->irq_flags |= IRQF_SHARED;
priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;
diff --git a/drivers/net/can/softing/softing_fw.c b/drivers/net/can/softing/softing_fw.c
index 310596175676..b595d3422b9f 100644
--- a/drivers/net/can/softing/softing_fw.c
+++ b/drivers/net/can/softing/softing_fw.c
@@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card,
const uint8_t *mem, *end, *dat;
uint16_t type, len;
uint32_t addr;
- uint8_t *buf = NULL;
+ uint8_t *buf = NULL, *new_buf;
int buflen = 0;
int8_t type_end = 0;
@@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card,
if (len > buflen) {
/* align buflen */
buflen = (len + (1024-1)) & ~(1024-1);
- buf = krealloc(buf, buflen, GFP_KERNEL);
- if (!buf) {
+ new_buf = krealloc(buf, buflen, GFP_KERNEL);
+ if (!new_buf) {
ret = -ENOMEM;
goto failed;
}
+ buf = new_buf;
}
/* verify record data */
memcpy_fromio(buf, &dpram[addr + offset], len);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 463b9ec57d80..6d1a24acb77e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params {
continue; \
else
-#define for_each_napi_rx_queue(bp, var) \
- for ((var) = 0; (var) < bp->num_napi_queues; (var)++)
-
/* Skip OOO FP */
#define for_each_tx_queue(bp, var) \
for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index e879e19eb0d6..af20c6ee2cd9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2046,6 +2046,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
*/
bnx2x_setup_tc(bp->dev, bp->max_cos);
+ /* Add all NAPI objects */
+ bnx2x_add_all_napi(bp);
bnx2x_napi_enable(bp);
/* set pf load just before approaching the MCP */
@@ -2408,6 +2410,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index dfa757e74296..21b553229ea4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -792,7 +792,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
bp->num_napi_queues = bp->num_queues;
/* Add NAPI objects */
- for_each_napi_rx_queue(bp, i)
+ for_each_rx_queue(bp, i)
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
bnx2x_poll, BNX2X_NAPI_WEIGHT);
}
@@ -801,7 +801,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
{
int i;
- for_each_napi_rx_queue(bp, i)
+ for_each_rx_queue(bp, i)
netif_napi_del(&bnx2x_fp(bp, i, napi));
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index fc4e0e3885b0..c37a68d68090 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -2888,11 +2888,9 @@ static void bnx2x_get_channels(struct net_device *dev,
*/
static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss)
{
- bnx2x_del_all_napi(bp);
bnx2x_disable_msi(bp);
BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE;
bnx2x_set_int_mode(bp);
- bnx2x_add_all_napi(bp);
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 02b5a343b195..21054987257a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8427,6 +8427,8 @@ unload_error:
/* Disable HW interrupts, NAPI */
bnx2x_netif_stop(bp, 1);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
/* Release IRQs */
bnx2x_free_irq(bp);
@@ -11229,10 +11231,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
+ int i;
- disable_irq(bp->pdev->irq);
- bnx2x_interrupt(bp->pdev->irq, dev);
- enable_irq(bp->pdev->irq);
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+ }
}
#endif
@@ -11899,9 +11903,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
*/
bnx2x_set_int_mode(bp);
- /* Add all NAPI objects */
- bnx2x_add_all_napi(bp);
-
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
@@ -11976,9 +11977,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
unregister_netdev(dev);
- /* Delete all NAPI objects */
- bnx2x_del_all_napi(bp);
-
/* Power on: we can't let PCI layer write to us while we are in D3 */
bnx2x_set_power_state(bp, PCI_D0);
@@ -12025,6 +12023,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
bnx2x_tx_disable(bp);
bnx2x_netif_stop(bp, 0);
+ /* Delete all NAPI objects */
+ bnx2x_del_all_napi(bp);
del_timer_sync(&bp->timer);
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 845b2020f291..138446957786 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
+ u16 cfg;
spin_lock_irqsave(&lp->lock, flags);
if (dev->flags & IFF_PROMISC)
@@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev)
/* in promiscuous mode, we accept errored packets,
* so we have to enable interrupts on them also
*/
- writereg(dev, PP_RxCFG,
- (lp->curr_rx_cfg |
- (lp->rx_mode == RX_ALL_ACCEPT)
- ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL)
- : 0));
+ cfg = lp->curr_rx_cfg;
+ if (lp->rx_mode == RX_ALL_ACCEPT)
+ cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL;
+ writereg(dev, PP_RxCFG, cfg);
spin_unlock_irqrestore(&lp->lock, flags);
}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 7fac97b4bb59..8c63d06ab12b 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter)
int num = 0, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- spin_lock_bh(&adapter->mcc_cq_lock);
+ spin_lock(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
/* Interpret flags as an async trailer */
@@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter)
if (num)
be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
- spin_unlock_bh(&adapter->mcc_cq_lock);
+ spin_unlock(&adapter->mcc_cq_lock);
return status;
}
@@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (be_error(adapter))
return -EIO;
+ local_bh_disable();
status = be_process_mcc(adapter);
+ local_bh_enable();
if (atomic_read(&mcc_obj->q.used) == 0)
break;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 90a903d83d87..78b8aa8069f0 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work)
/* when interrupts are not yet enabled, just reap any pending
* mcc completions */
if (!netif_running(adapter->netdev)) {
+ local_bh_disable();
be_process_mcc(adapter);
+ local_bh_enable();
goto reschedule;
}
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4605f7246687..d3233f59a82e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+ dev->features |= NETIF_F_HW_VLAN_RX;
}
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index cd153326c3cf..cb3356c9af80 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -310,6 +310,7 @@ struct e1000_adapter {
*/
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
+ u32 tx_fifo_limit;
struct napi_struct napi;
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 46c3b1f9ff89..d01a099475a1 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3517,6 +3517,15 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
/*
+ * Alignment of Tx data is on an arbitrary byte boundary with the
+ * maximum size per Tx descriptor limited only to the transmit
+ * allocation of the packet buffer minus 96 bytes with an upper
+ * limit of 24KB due to receive synchronization limitations.
+ */
+ adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
+ 24 << 10);
+
+ /*
* Disable Adaptive Interrupt Moderation if 2 full packets cannot
* fit in receive buffer.
*/
@@ -4785,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
return 1;
}
-#define E1000_MAX_PER_TXD 8192
-#define E1000_MAX_TXD_PWR 12
-
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
unsigned int first, unsigned int max_per_txd,
- unsigned int nr_frags, unsigned int mss)
+ unsigned int nr_frags)
{
struct e1000_adapter *adapter = tx_ring->adapter;
struct pci_dev *pdev = adapter->pdev;
@@ -5023,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
{
+ BUG_ON(size > tx_ring->count);
+
if (e1000_desc_unused(tx_ring) >= size)
return 0;
return __e1000_maybe_stop_tx(tx_ring, size);
}
-#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1)
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int first;
- unsigned int max_per_txd = E1000_MAX_PER_TXD;
- unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0;
unsigned int len = skb_headlen(skb);
unsigned int nr_frags;
@@ -5056,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
mss = skb_shinfo(skb)->gso_size;
- /*
- * The controller does a simple calculation to
- * make sure there is enough room in the FIFO before
- * initiating the DMA for each buffer. The calc is:
- * 4 = ceil(buffer len/mss). To make sure we don't
- * overrun the FIFO, adjust the max buffer len if mss
- * drops.
- */
if (mss) {
u8 hdr_len;
- max_per_txd = min(mss << 2, max_per_txd);
- max_txd_pwr = fls(max_per_txd) - 1;
/*
* TSO Workaround for 82571/2/3 Controllers -- if skb->data
@@ -5097,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count++;
count++;
- count += TXD_USE_COUNT(len, max_txd_pwr);
+ count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
nr_frags = skb_shinfo(skb)->nr_frags;
for (f = 0; f < nr_frags; f++)
- count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
- max_txd_pwr);
+ count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
+ adapter->tx_fifo_limit);
if (adapter->hw.mac.tx_pkt_filtering)
e1000_transfer_dhcp_info(adapter, skb);
@@ -5144,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
tx_flags |= E1000_TX_FLAGS_NO_FCS;
/* if count is 0 then mapping error has occurred */
- count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss);
+ count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
+ nr_frags);
if (count) {
skb_tx_timestamp(skb);
netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(tx_ring, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
- e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2);
-
+ e1000_maybe_stop_tx(tx_ring,
+ (MAX_SKB_FRAGS *
+ DIV_ROUND_UP(PAGE_SIZE,
+ adapter->tx_fifo_limit) + 2));
} else {
dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0;
@@ -6327,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->hw.phy.autoneg_advertised = 0x2f;
/* ring size defaults */
- adapter->rx_ring->count = 256;
- adapter->tx_ring->count = 256;
+ adapter->rx_ring->count = E1000_DEFAULT_RXD;
+ adapter->tx_ring->count = E1000_DEFAULT_TXD;
/*
* Initial Wake on LAN setting - If APM wake is enabled in
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 8cba2df82b18..5faedd855b77 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -863,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
&ip_entry->ip4dst, &ip_entry->pdst);
if (rc != 0) {
rc = efx_filter_get_ipv4_full(
- &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
- &ip_entry->ip4dst, &ip_entry->pdst);
+ &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
+ &ip_entry->ip4src, &ip_entry->psrc);
EFX_WARN_ON_PARANOID(rc);
ip_mask->ip4src = ~0;
ip_mask->psrc = ~0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index e2d083228f3a..719be3912aa9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
#include <linux/phy.h>
@@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
+
+#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
index 9820ec842cc0..223adf95fd03 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -20,6 +20,10 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
struct dma_desc {
/* Receive descriptor */
union {
@@ -166,3 +170,5 @@ enum tdes_csum_insertion {
* is not calculated */
cic_full = 3, /* IP header and pseudoheader */
};
+
+#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index dd8d6e19dff6..7ee9499a6e38 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -27,6 +27,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
#if defined(CONFIG_STMMAC_RING)
static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end)
{
@@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len)
p->des01.tx.buffer1_size = len;
}
#endif
+
+#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
index 7c6d857a9cc7..2ec6aeae349e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
#include <linux/phy.h>
#include "common.h"
@@ -119,3 +122,5 @@ enum ttc_control {
#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index f90fcb5f9573..0e4cacedc1f0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -19,6 +19,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
#include <linux/phy.h>
#include "common.h"
@@ -229,6 +231,7 @@ enum rtc_control {
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
/* Synopsys Core versions */
-#define DWMAC_CORE_3_40 34
+#define DWMAC_CORE_3_40 0x34
extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index e678ce39d014..e49c9a0fd6ff 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
/* DMA CRS Control and Status Register Mapping */
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
@@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr);
extern void dwmac_dma_stop_rx(void __iomem *ioaddr);
extern int dwmac_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x);
+
+#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index a38352024cb8..67995ef25251 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -22,6 +22,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __MMC_H__
+#define __MMC_H__
+
/* MMC control register */
/* When set, all counter are reset */
#define MMC_CNTRL_COUNTER_RESET 0x1
@@ -129,3 +132,5 @@ struct stmmac_counters {
extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode);
extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr);
extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc);
+
+#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index c07cfe989f6e..0c74a702d461 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -33,7 +33,7 @@
#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
-#define MMC_DEFAUL_MASK 0xffffffff
+#define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */
@@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
/* To mask all all interrupts.*/
void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
- writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK);
- writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index f2d3665430ad..e872e1da3137 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
#define STMMAC_RESOURCE_NAME "stmmaceth"
#define DRV_MODULE_VERSION "March_2012"
@@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void)
{
}
#endif /* CONFIG_STMMAC_PCI */
+
+#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
index 6863590d184b..aea9b14cdfbe 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h
@@ -21,6 +21,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#ifndef __STMMAC_TIMER_H__
+#define __STMMAC_TIMER_H__
struct stmmac_timer {
void (*timer_start) (unsigned int new_freq);
@@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev);
extern int tmu2_register_user(void *fnt, void *data);
extern void tmu2_unregister_user(void);
#endif
+
+#endif /* __STMMAC_TIMER_H__ */
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index cd7ee204e94a..a9ca4a03d31b 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data = dev_get_drvdata(dev);
- if (data->bus)
+ if (data->bus) {
+ mdiobus_unregister(data->bus);
mdiobus_free(data->bus);
+ }
if (data->clk)
clk_put(data->clk);
diff --git a/drivers/net/fddi/skfp/pmf.c b/drivers/net/fddi/skfp/pmf.c
index 24d8566cfd8b..441b4dc79450 100644
--- a/drivers/net/fddi/skfp/pmf.c
+++ b/drivers/net/fddi/skfp/pmf.c
@@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para,
sm_pm_get_ls(smc,port_to_mib(smc,port))) ;
break ;
case SMT_P_REASON :
- * (u_long *) to = 0 ;
+ *(u32 *)to = 0 ;
sp_len = 4 ;
goto sp_done ;
case SMT_P1033 : /* time stamp */
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 328397c66730..adfab3fc5478 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -413,7 +413,9 @@ static const struct usb_device_id products[] = {
/* 5. Gobi 2000 and 3000 devices */
{QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */
+ {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */
+ {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
{QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */
{QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */
{QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */
@@ -441,6 +443,8 @@ static const struct usb_device_id products[] = {
{QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
{QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */
+ {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */
+ {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */
{ } /* END */
};
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8531c1caac28..fd4b26d46fd5 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1573,7 +1573,7 @@ int usbnet_resume (struct usb_interface *intf)
netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
- rx_alloc_submit(dev, GFP_KERNEL);
+ rx_alloc_submit(dev, GFP_NOIO);
if (!(dev->txq.qlen >= TX_QLEN(dev)))
netif_tx_wake_all_queues(dev->net);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 4026c906cc7b..b7e0258887e7 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
case AR5K_EEPROM_MODE_11A:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
rate_pcal_info = ee->ee_rate_tpwr_a;
- ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN;
+ ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
break;
case AR5K_EEPROM_MODE_11B:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index dc2bcfeadeb4..94a9bbea6874 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -182,6 +182,7 @@
#define AR5K_EEPROM_EEP_DELTA 10
#define AR5K_EEPROM_N_MODES 3
#define AR5K_EEPROM_N_5GHZ_CHAN 10
+#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8
#define AR5K_EEPROM_N_2GHZ_CHAN 3
#define AR5K_EEPROM_N_2GHZ_CHAN_2413 4
#define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 192ad5c1fcc8..a5edebeb0b4f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -1233,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl)
/* dpc will not be rescheduled */
wl->resched = false;
+ /* inform publicly that interface is down */
+ wl->pub->up = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 95aa8e1683ec..83324b321652 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status)
return;
}
len = ETH_ALEN;
- ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len);
+ ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid,
+ &len);
if (ret) {
IPW_DEBUG_INFO("failed querying ordinals at line %d\n",
__LINE__);
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index 46782f1102ac..a47b306b522c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
const struct fw_img *img;
size_t bufsz;
+ if (!iwl_is_ready_rf(priv))
+ return -EAGAIN;
+
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
priv->dbgfs_sram_offset = 0x800000;
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index d9694c58208c..4ffc18dc3a57 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q);
/*****************************************************
* Error handling
******************************************************/
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display);
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_dump_csr(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 39a6ca1f009c..d1a61ba6247a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
}
iwl_dump_csr(trans);
- iwl_dump_fh(trans, NULL, false);
+ iwl_dump_fh(trans, NULL);
iwl_op_mode_nic_error(trans->op_mode);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 939c2f78df58..1e86ea2266d4 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1649,13 +1649,9 @@ static const char *get_fh_string(int cmd)
#undef IWL_CMD
}
-int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
{
int i;
-#ifdef CONFIG_IWLWIFI_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
static const u32 fh_tbl[] = {
FH_RSCSR_CHNL0_STTS_WPTR_REG,
FH_RSCSR_CHNL0_RBDCB_BASE_REG,
@@ -1667,29 +1663,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display)
FH_TSSR_TX_STATUS_REG,
FH_TSSR_TX_ERROR_REG
};
-#ifdef CONFIG_IWLWIFI_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (buf) {
+ int pos = 0;
+ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
*buf = kmalloc(bufsz, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
+
pos += scnprintf(*buf + pos, bufsz - pos,
"FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
pos += scnprintf(*buf + pos, bufsz - pos,
" %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i]));
- }
+
return pos;
}
#endif
+
IWL_ERR(trans, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
IWL_ERR(trans, " %34s: 0X%08x\n",
get_fh_string(fh_tbl[i]),
iwl_read_direct32(trans, fh_tbl[i]));
- }
+
return 0;
}
@@ -1982,11 +1984,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
size_t count, loff_t *ppos)
{
struct iwl_trans *trans = file->private_data;
- char *buf;
+ char *buf = NULL;
int pos = 0;
ssize_t ret = -EFAULT;
- ret = pos = iwl_dump_fh(trans, &buf, true);
+ ret = pos = iwl_dump_fh(trans, &buf);
if (buf) {
ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 30899901aef5..650f79a1f2bd 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -57,8 +57,7 @@
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
- struct page *page;
- unsigned offset;
+ int pull_to;
};
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
@@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev,
struct sk_buff *skb;
while ((skb = __skb_dequeue(rxq)) != NULL) {
- struct page *page = NETFRONT_SKB_CB(skb)->page;
- void *vaddr = page_address(page);
- unsigned offset = NETFRONT_SKB_CB(skb)->offset;
-
- memcpy(skb->data, vaddr + offset,
- skb_headlen(skb));
+ int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
- if (page != skb_frag_page(&skb_shinfo(skb)->frags[0]))
- __free_page(page);
+ __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
@@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget)
struct sk_buff_head errq;
struct sk_buff_head tmpq;
unsigned long flags;
- unsigned int len;
int err;
spin_lock(&np->rx_lock);
@@ -955,24 +947,13 @@ err:
}
}
- NETFRONT_SKB_CB(skb)->page =
- skb_frag_page(&skb_shinfo(skb)->frags[0]);
- NETFRONT_SKB_CB(skb)->offset = rx->offset;
-
- len = rx->status;
- if (len > RX_COPY_THRESHOLD)
- len = RX_COPY_THRESHOLD;
- skb_put(skb, len);
+ NETFRONT_SKB_CB(skb)->pull_to = rx->status;
+ if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
+ NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
- if (rx->status > len) {
- skb_shinfo(skb)->frags[0].page_offset =
- rx->offset + len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len);
- skb->data_len = rx->status - len;
- } else {
- __skb_fill_page_desc(skb, 0, NULL, 0, 0);
- skb_shinfo(skb)->nr_frags = 0;
- }
+ skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
+ skb->data_len = rx->status;
i = xennet_fill_frags(np, skb, &tmpq);
@@ -999,7 +980,7 @@ err:
* receive throughout using the standard receive
* buffer size was cut by 25%(!!!).
*/
- skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
+ skb->truesize += skb->data_len - RX_COPY_THRESHOLD;
skb->len += skb->data_len;
if (rx->flags & XEN_NETRXF_csum_blank)
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 831868904e02..1dd61f402b04 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -58,6 +58,7 @@ struct sam9_rtc {
struct rtc_device *rtcdev;
u32 imr;
void __iomem *gpbr;
+ int irq;
};
#define rtt_readl(rtc, field) \
@@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
{
struct resource *r, *r_gpbr;
struct sam9_rtc *rtc;
- int ret;
+ int ret, irq;
u32 mr;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get interrupt resource\n");
+ return irq;
+ }
+
rtc = kzalloc(sizeof *rtc, GFP_KERNEL);
if (!rtc)
return -ENOMEM;
+ rtc->irq = irq;
+
/* platform setup code should have handled this; sigh */
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
@@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev)
}
/* register irq handler after we know what name we'll use */
- ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
- IRQF_SHARED,
+ ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED,
dev_name(&rtc->rtcdev->dev), rtc);
if (ret) {
- dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS);
+ dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq);
rtc_device_unregister(rtc->rtcdev);
goto fail_register;
}
@@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev)
/* disable all interrupts */
rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN));
- free_irq(AT91_ID_SYS, rtc);
+ free_irq(rtc->irq, rtc);
rtc_device_unregister(rtc->rtcdev);
@@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev,
rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN);
if (rtc->imr) {
if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) {
- enable_irq_wake(AT91_ID_SYS);
+ enable_irq_wake(rtc->irq);
/* don't let RTTINC cause wakeups */
if (mr & AT91_RTT_RTTINCIEN)
rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
@@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev)
if (rtc->imr) {
if (device_may_wakeup(&pdev->dev))
- disable_irq_wake(AT91_ID_SYS);
+ disable_irq_wake(rtc->irq);
mr = rtt_readl(rtc, MR);
rtt_writel(rtc, MR, mr | rtc->imr);
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 40a826a7295f..2fb2b9ea97ec 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -3804,7 +3804,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
case BIODASDSYMMIO:
return dasd_symm_io(device, argp);
default:
- return -ENOIOCTLCMD;
+ return -ENOTTY;
}
}
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index cceae70279f6..654c6921a6d4 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
break;
default:
/* if the discipline has an ioctl method try it. */
- if (base->discipline->ioctl) {
+ rc = -ENOTTY;
+ if (base->discipline->ioctl)
rc = base->discipline->ioctl(block, cmd, argp);
- if (rc == -ENOIOCTLCMD)
- rc = -EINVAL;
- } else
- rc = -EINVAL;
}
dasd_put_device(base);
return rc;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 76db75e836ed..e58cd7d2fce4 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -543,14 +543,6 @@ static void iblock_complete_cmd(struct se_cmd *cmd)
kfree(ibr);
}
-static void iblock_bio_destructor(struct bio *bio)
-{
- struct se_cmd *cmd = bio->bi_private;
- struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-
- bio_free(bio, ib_dev->ibd_bio_set);
-}
-
static struct bio *
iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
{
@@ -572,7 +564,6 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = cmd;
- bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
return bio;
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
index 77da6a2f43dc..c03ecdd31e4c 100644
--- a/drivers/video/auo_k190x.c
+++ b/drivers/video/auo_k190x.c
@@ -987,7 +987,6 @@ err_regfb:
fb_dealloc_cmap(&info->cmap);
err_cmap:
fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
err_defio:
vfree((void *)info->screen_base);
err_irq:
@@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev)
fb_dealloc_cmap(&info->cmap);
fb_deferred_io_cleanup(info);
- kfree(info->fbdefio);
vfree((void *)info->screen_base);
diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c
index 28b1a834906b..61b182bf32a2 100644
--- a/drivers/video/console/bitblit.c
+++ b/drivers/video/console/bitblit.c
@@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
image.depth = 1;
if (attribute) {
- buf = kmalloc(cellsize, GFP_KERNEL);
+ buf = kmalloc(cellsize, GFP_ATOMIC);
if (!buf)
return;
}
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 88e92041d8f0..fdefa8fd72c4 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -449,7 +449,7 @@ static int __init fb_console_setup(char *this_opt)
while ((options = strsep(&this_opt, ",")) != NULL) {
if (!strncmp(options, "font:", 5))
- strcpy(fontname, options + 5);
+ strlcpy(fontname, options + 5, sizeof(fontname));
if (!strncmp(options, "scrollback:", 11)) {
options += 11;
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 00ce1f34b496..57d940be5f3d 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd,
case MB862XX_L1_SET_CFG:
if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg)))
return -EFAULT;
+ if (l1_cfg->dh == 0 || l1_cfg->dw == 0)
+ return -EINVAL;
if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) {
/* downscaling */
outreg(cap, GC_CAP_CSC,
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 5d31699fbd3c..f43bfe17b3b6 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
sdi_config_lcd_manager(dssdev);
+ /*
+ * LCLK and PCLK divisors are located in shadow registers, and we
+ * normally write them to DISPC registers when enabling the output.
+ * However, SDI uses pck-free as source clock for its PLL, and pck-free
+ * is affected by the divisors. And as we need the PLL before enabling
+ * the output, we need to write the divisors early.
+ *
+ * It seems just writing to the DISPC register is enough, and we don't
+ * need to care about the shadow register mechanism for pck-free. The
+ * exact reason for this is unknown.
+ */
+ dispc_mgr_set_clock_div(dssdev->manager->id,
+ &sdi.mgr_config.clock_info);
+
dss_sdi_init(dssdev->phy.sdi.datapairs);
r = dss_sdi_enable();
if (r)
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 08ec1a7103f2..fc671d3d8004 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
break;
if (regno < 16) {
- u16 pal;
+ u32 pal;
pal = ((red >> (16 - var->red.length)) <<
var->red.offset) |
((green >> (16 - var->green.length)) <<
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index e85c04b9f61c..a3f28f331b2b 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -70,23 +70,25 @@ static inline int use_bip_pool(unsigned int idx)
}
/**
- * bio_integrity_alloc_bioset - Allocate integrity payload and attach it to bio
+ * bio_integrity_alloc - Allocate integrity payload and attach it to bio
* @bio: bio to attach integrity metadata to
* @gfp_mask: Memory allocation mask
* @nr_vecs: Number of integrity metadata scatter-gather elements
- * @bs: bio_set to allocate from
*
* Description: This function prepares a bio for attaching integrity
* metadata. nr_vecs specifies the maximum number of pages containing
* integrity metadata that can be attached.
*/
-struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs,
- struct bio_set *bs)
+struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
+ gfp_t gfp_mask,
+ unsigned int nr_vecs)
{
struct bio_integrity_payload *bip;
unsigned int idx = vecs_to_idx(nr_vecs);
+ struct bio_set *bs = bio->bi_pool;
+
+ if (!bs)
+ bs = fs_bio_set;
BUG_ON(bio == NULL);
bip = NULL;
@@ -114,37 +116,22 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
return bip;
}
-EXPORT_SYMBOL(bio_integrity_alloc_bioset);
-
-/**
- * bio_integrity_alloc - Allocate integrity payload and attach it to bio
- * @bio: bio to attach integrity metadata to
- * @gfp_mask: Memory allocation mask
- * @nr_vecs: Number of integrity metadata scatter-gather elements
- *
- * Description: This function prepares a bio for attaching integrity
- * metadata. nr_vecs specifies the maximum number of pages containing
- * integrity metadata that can be attached.
- */
-struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
- gfp_t gfp_mask,
- unsigned int nr_vecs)
-{
- return bio_integrity_alloc_bioset(bio, gfp_mask, nr_vecs, fs_bio_set);
-}
EXPORT_SYMBOL(bio_integrity_alloc);
/**
* bio_integrity_free - Free bio integrity payload
* @bio: bio containing bip to be freed
- * @bs: bio_set this bio was allocated from
*
* Description: Used to free the integrity portion of a bio. Usually
* called from bio_free().
*/
-void bio_integrity_free(struct bio *bio, struct bio_set *bs)
+void bio_integrity_free(struct bio *bio)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
+ struct bio_set *bs = bio->bi_pool;
+
+ if (!bs)
+ bs = fs_bio_set;
BUG_ON(bip == NULL);
@@ -730,19 +717,18 @@ EXPORT_SYMBOL(bio_integrity_split);
* @bio: New bio
* @bio_src: Original bio
* @gfp_mask: Memory allocation mask
- * @bs: bio_set to allocate bip from
*
* Description: Called to allocate a bip when cloning a bio
*/
int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask, struct bio_set *bs)
+ gfp_t gfp_mask)
{
struct bio_integrity_payload *bip_src = bio_src->bi_integrity;
struct bio_integrity_payload *bip;
BUG_ON(bip_src == NULL);
- bip = bio_integrity_alloc_bioset(bio, gfp_mask, bip_src->bip_vcnt, bs);
+ bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
if (bip == NULL)
return -EIO;
diff --git a/fs/bio.c b/fs/bio.c
index 71072ab99128..f855e0e1869c 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -55,6 +55,7 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
* IO code that does not need private memory pools.
*/
struct bio_set *fs_bio_set;
+EXPORT_SYMBOL(fs_bio_set);
/*
* Our slab pool management
@@ -233,26 +234,37 @@ fallback:
return bvl;
}
-void bio_free(struct bio *bio, struct bio_set *bs)
+static void __bio_free(struct bio *bio)
{
+ bio_disassociate_task(bio);
+
+ if (bio_integrity(bio))
+ bio_integrity_free(bio);
+}
+
+static void bio_free(struct bio *bio)
+{
+ struct bio_set *bs = bio->bi_pool;
void *p;
- if (bio_has_allocated_vec(bio))
- bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
+ __bio_free(bio);
- if (bio_integrity(bio))
- bio_integrity_free(bio, bs);
+ if (bs) {
+ if (bio_has_allocated_vec(bio))
+ bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
- /*
- * If we have front padding, adjust the bio pointer before freeing
- */
- p = bio;
- if (bs->front_pad)
+ /*
+ * If we have front padding, adjust the bio pointer before freeing
+ */
+ p = bio;
p -= bs->front_pad;
- mempool_free(p, bs->bio_pool);
+ mempool_free(p, bs->bio_pool);
+ } else {
+ /* Bio was allocated by bio_kmalloc() */
+ kfree(bio);
+ }
}
-EXPORT_SYMBOL(bio_free);
void bio_init(struct bio *bio)
{
@@ -263,48 +275,85 @@ void bio_init(struct bio *bio)
EXPORT_SYMBOL(bio_init);
/**
+ * bio_reset - reinitialize a bio
+ * @bio: bio to reset
+ *
+ * Description:
+ * After calling bio_reset(), @bio will be in the same state as a freshly
+ * allocated bio returned bio bio_alloc_bioset() - the only fields that are
+ * preserved are the ones that are initialized by bio_alloc_bioset(). See
+ * comment in struct bio.
+ */
+void bio_reset(struct bio *bio)
+{
+ unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
+
+ __bio_free(bio);
+
+ memset(bio, 0, BIO_RESET_BYTES);
+ bio->bi_flags = flags|(1 << BIO_UPTODATE);
+}
+EXPORT_SYMBOL(bio_reset);
+
+/**
* bio_alloc_bioset - allocate a bio for I/O
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
* @bs: the bio_set to allocate from.
*
* Description:
- * bio_alloc_bioset will try its own mempool to satisfy the allocation.
- * If %__GFP_WAIT is set then we will block on the internal pool waiting
- * for a &struct bio to become free.
+ * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
+ * backed by the @bs's mempool.
*
- * Note that the caller must set ->bi_destructor on successful return
- * of a bio, to do the appropriate freeing of the bio once the reference
- * count drops to zero.
- **/
+ * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
+ * able to allocate a bio. This is due to the mempool guarantees. To make this
+ * work, callers must never allocate more than 1 bio at a time from this pool.
+ * Callers that need to allocate more than 1 bio must always submit the
+ * previously allocated bio for IO before attempting to allocate a new one.
+ * Failure to do so can cause deadlocks under memory pressure.
+ *
+ * RETURNS:
+ * Pointer to new bio on success, NULL on failure.
+ */
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
+ unsigned front_pad;
+ unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
struct bio *bio;
void *p;
- p = mempool_alloc(bs->bio_pool, gfp_mask);
+ if (!bs) {
+ if (nr_iovecs > UIO_MAXIOV)
+ return NULL;
+
+ p = kmalloc(sizeof(struct bio) +
+ nr_iovecs * sizeof(struct bio_vec),
+ gfp_mask);
+ front_pad = 0;
+ inline_vecs = nr_iovecs;
+ } else {
+ p = mempool_alloc(bs->bio_pool, gfp_mask);
+ front_pad = bs->front_pad;
+ inline_vecs = BIO_INLINE_VECS;
+ }
+
if (unlikely(!p))
return NULL;
- bio = p + bs->front_pad;
+ bio = p + front_pad;
bio_init(bio);
- if (unlikely(!nr_iovecs))
- goto out_set;
-
- if (nr_iovecs <= BIO_INLINE_VECS) {
- bvl = bio->bi_inline_vecs;
- nr_iovecs = BIO_INLINE_VECS;
- } else {
+ if (nr_iovecs > inline_vecs) {
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
if (unlikely(!bvl))
goto err_free;
-
- nr_iovecs = bvec_nr_vecs(idx);
+ } else if (nr_iovecs) {
+ bvl = bio->bi_inline_vecs;
}
-out_set:
+
+ bio->bi_pool = bs;
bio->bi_flags |= idx << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bvl;
@@ -316,80 +365,6 @@ err_free:
}
EXPORT_SYMBOL(bio_alloc_bioset);
-static void bio_fs_destructor(struct bio *bio)
-{
- bio_free(bio, fs_bio_set);
-}
-
-/**
- * bio_alloc - allocate a new bio, memory pool backed
- * @gfp_mask: allocation mask to use
- * @nr_iovecs: number of iovecs
- *
- * bio_alloc will allocate a bio and associated bio_vec array that can hold
- * at least @nr_iovecs entries. Allocations will be done from the
- * fs_bio_set. Also see @bio_alloc_bioset and @bio_kmalloc.
- *
- * If %__GFP_WAIT is set, then bio_alloc will always be able to allocate
- * a bio. This is due to the mempool guarantees. To make this work, callers
- * must never allocate more than 1 bio at a time from this pool. Callers
- * that need to allocate more than 1 bio must always submit the previously
- * allocated bio for IO before attempting to allocate a new one. Failure to
- * do so can cause livelocks under memory pressure.
- *
- * RETURNS:
- * Pointer to new bio on success, NULL on failure.
- */
-struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
-
- if (bio)
- bio->bi_destructor = bio_fs_destructor;
-
- return bio;
-}
-EXPORT_SYMBOL(bio_alloc);
-
-static void bio_kmalloc_destructor(struct bio *bio)
-{
- if (bio_integrity(bio))
- bio_integrity_free(bio, fs_bio_set);
- kfree(bio);
-}
-
-/**
- * bio_kmalloc - allocate a bio for I/O using kmalloc()
- * @gfp_mask: the GFP_ mask given to the slab allocator
- * @nr_iovecs: number of iovecs to pre-allocate
- *
- * Description:
- * Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
- * %__GFP_WAIT, the allocation is guaranteed to succeed.
- *
- **/
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
-{
- struct bio *bio;
-
- if (nr_iovecs > UIO_MAXIOV)
- return NULL;
-
- bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
- gfp_mask);
- if (unlikely(!bio))
- return NULL;
-
- bio_init(bio);
- bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
- bio->bi_max_vecs = nr_iovecs;
- bio->bi_io_vec = bio->bi_inline_vecs;
- bio->bi_destructor = bio_kmalloc_destructor;
-
- return bio;
-}
-EXPORT_SYMBOL(bio_kmalloc);
-
void zero_fill_bio(struct bio *bio)
{
unsigned long flags;
@@ -420,11 +395,8 @@ void bio_put(struct bio *bio)
/*
* last put frees it
*/
- if (atomic_dec_and_test(&bio->bi_cnt)) {
- bio_disassociate_task(bio);
- bio->bi_next = NULL;
- bio->bi_destructor(bio);
- }
+ if (atomic_dec_and_test(&bio->bi_cnt))
+ bio_free(bio);
}
EXPORT_SYMBOL(bio_put);
@@ -466,26 +438,28 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
EXPORT_SYMBOL(__bio_clone);
/**
- * bio_clone - clone a bio
+ * bio_clone_bioset - clone a bio
* @bio: bio to clone
* @gfp_mask: allocation priority
+ * @bs: bio_set to allocate from
*
* Like __bio_clone, only also allocates the returned bio
*/
-struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
+struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
+ struct bio_set *bs)
{
- struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
+ struct bio *b;
+ b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs);
if (!b)
return NULL;
- b->bi_destructor = bio_fs_destructor;
__bio_clone(b, bio);
if (bio_integrity(bio)) {
int ret;
- ret = bio_integrity_clone(b, bio, gfp_mask, fs_bio_set);
+ ret = bio_integrity_clone(b, bio, gfp_mask);
if (ret < 0) {
bio_put(b);
@@ -495,7 +469,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
return b;
}
-EXPORT_SYMBOL(bio_clone);
+EXPORT_SYMBOL(bio_clone_bioset);
/**
* bio_get_nr_vecs - return approx number of vecs
@@ -1513,9 +1487,12 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
bp->bv1 = bi->bi_io_vec[0];
bp->bv2 = bi->bi_io_vec[0];
- bp->bv2.bv_offset += first_sectors << 9;
- bp->bv2.bv_len -= first_sectors << 9;
- bp->bv1.bv_len = first_sectors << 9;
+
+ if (bio_is_rw(bi)) {
+ bp->bv2.bv_offset += first_sectors << 9;
+ bp->bv2.bv_len -= first_sectors << 9;
+ bp->bv1.bv_len = first_sectors << 9;
+ }
bp->bio1.bi_io_vec = &bp->bv1;
bp->bio2.bi_io_vec = &bp->bv2;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 38e721b35d45..37967bcea05c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -116,6 +116,8 @@ EXPORT_SYMBOL(invalidate_bdev);
int set_blocksize(struct block_device *bdev, int size)
{
+ struct address_space *mapping;
+
/* Size must be a power of two, and between 512 and PAGE_SIZE */
if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
return -EINVAL;
@@ -124,6 +126,20 @@ int set_blocksize(struct block_device *bdev, int size)
if (size < bdev_logical_block_size(bdev))
return -EINVAL;
+ /* Prevent starting I/O or mapping the device */
+ percpu_down_write(&bdev->bd_block_size_semaphore);
+
+ /* Check that the block device is not memory mapped */
+ mapping = bdev->bd_inode->i_mapping;
+ mutex_lock(&mapping->i_mmap_mutex);
+ if (!prio_tree_empty(&mapping->i_mmap) ||
+ !list_empty(&mapping->i_mmap_nonlinear)) {
+ mutex_unlock(&mapping->i_mmap_mutex);
+ percpu_up_write(&bdev->bd_block_size_semaphore);
+ return -EBUSY;
+ }
+ mutex_unlock(&mapping->i_mmap_mutex);
+
/* Don't change the size if it is same as current */
if (bdev->bd_block_size != size) {
sync_blockdev(bdev);
@@ -131,6 +147,9 @@ int set_blocksize(struct block_device *bdev, int size)
bdev->bd_inode->i_blkbits = blksize_bits(size);
kill_bdev(bdev);
}
+
+ percpu_up_write(&bdev->bd_block_size_semaphore);
+
return 0;
}
@@ -441,6 +460,12 @@ static struct inode *bdev_alloc_inode(struct super_block *sb)
struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
if (!ei)
return NULL;
+
+ if (unlikely(percpu_init_rwsem(&ei->bdev.bd_block_size_semaphore))) {
+ kmem_cache_free(bdev_cachep, ei);
+ return NULL;
+ }
+
return &ei->vfs_inode;
}
@@ -449,6 +474,8 @@ static void bdev_i_callback(struct rcu_head *head)
struct inode *inode = container_of(head, struct inode, i_rcu);
struct bdev_inode *bdi = BDEV_I(inode);
+ percpu_free_rwsem(&bdi->bdev.bd_block_size_semaphore);
+
kmem_cache_free(bdev_cachep, bdi);
}
@@ -1567,6 +1594,22 @@ static long block_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return blkdev_ioctl(bdev, mode, cmd, arg);
}
+ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ ssize_t ret;
+ struct block_device *bdev = I_BDEV(iocb->ki_filp->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blkdev_aio_read);
+
/*
* Write data to the block device. Only intended for the block device itself
* and the raw driver which basically is a fake block device.
@@ -1578,12 +1621,16 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
struct blk_plug plug;
ssize_t ret;
BUG_ON(iocb->ki_pos != pos);
blk_start_plug(&plug);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
if (ret > 0 || ret == -EIOCBQUEUED) {
ssize_t err;
@@ -1592,11 +1639,29 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (err < 0 && ret > 0)
ret = err;
}
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
blk_finish_plug(&plug);
+
return ret;
}
EXPORT_SYMBOL_GPL(blkdev_aio_write);
+static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ int ret;
+ struct block_device *bdev = I_BDEV(file->f_mapping->host);
+
+ percpu_down_read(&bdev->bd_block_size_semaphore);
+
+ ret = generic_file_mmap(file, vma);
+
+ percpu_up_read(&bdev->bd_block_size_semaphore);
+
+ return ret;
+}
+
/*
* Try to release a page associated with block device when the system
* is under memory pressure.
@@ -1627,9 +1692,9 @@ const struct file_operations def_blk_fops = {
.llseek = block_llseek,
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
+ .aio_read = blkdev_aio_read,
.aio_write = blkdev_aio_write,
- .mmap = generic_file_mmap,
+ .mmap = blkdev_mmap,
.fsync = blkdev_fsync,
.unlocked_ioctl = block_ioctl,
#ifdef CONFIG_COMPAT
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 074923ce593d..f0cf934ba877 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
/* result already set, check signature */
if (server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
- if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
- server, mid->sequence_number + 1))
- cERROR(1, "Unexpected SMB signature");
+ int rc = 0;
+
+ rc = cifs_verify_signature(rdata->iov, rdata->nr_iov,
+ server,
+ mid->sequence_number + 1);
+ if (rc)
+ cERROR(1, "SMB signature verification returned "
+ "error = %d", rc);
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->bytes);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index cbe709ad6663..781025be48bc 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -356,19 +356,12 @@ cifs_create_get_file_info:
cifs_create_set_dentry:
if (rc != 0) {
cFYI(1, "Create worked, get_inode_info failed rc = %d", rc);
+ CIFSSMBClose(xid, tcon, *fileHandle);
goto out;
}
d_drop(direntry);
d_add(direntry, newinode);
- /* ENOENT for create? How weird... */
- rc = -ENOENT;
- if (!newinode) {
- CIFSSMBClose(xid, tcon, *fileHandle);
- goto out;
- }
- rc = 0;
-
out:
kfree(buf);
kfree(full_path);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 7354877fa3bd..cb79c7edecb0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
{
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- unsigned long oldtime = cifs_i->time;
cifs_revalidate_cache(inode, fattr);
+ spin_lock(&inode->i_lock);
inode->i_atime = fattr->cf_atime;
inode->i_mtime = fattr->cf_mtime;
inode->i_ctime = fattr->cf_ctime;
@@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
else
cifs_i->time = jiffies;
- cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode,
- oldtime, cifs_i->time);
-
cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING;
cifs_i->server_eof = fattr->cf_eof;
@@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
* Can't safely change the file size here if the client is writing to
* it due to potential races.
*/
- spin_lock(&inode->i_lock);
if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) {
i_size_write(inode, fattr->cf_eof);
@@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb)
if (rc && tcon->ipc) {
cFYI(1, "ipc connection - fake read inode");
+ spin_lock(&inode->i_lock);
inode->i_mode |= S_IFDIR;
set_nlink(inode, 2);
inode->i_op = &cifs_ipc_inode_ops;
inode->i_fop = &simple_dir_operations;
inode->i_uid = cifs_sb->mnt_uid;
inode->i_gid = cifs_sb->mnt_gid;
+ spin_unlock(&inode->i_lock);
} else if (rc) {
iget_failed(inode);
inode = ERR_PTR(rc);
@@ -1110,6 +1108,15 @@ undo_setattr:
goto out_close;
}
+/* copied from fs/nfs/dir.c with small changes */
+static void
+cifs_drop_nlink(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&inode->i_lock);
+}
/*
* If dentry->d_inode is null (usually meaning the cached dentry
@@ -1166,13 +1173,13 @@ retry_std_delete:
psx_del_no_retry:
if (!rc) {
if (inode)
- drop_nlink(inode);
+ cifs_drop_nlink(inode);
} else if (rc == -ENOENT) {
d_drop(dentry);
} else if (rc == -ETXTBSY) {
rc = cifs_rename_pending_delete(full_path, dentry, xid);
if (rc == 0)
- drop_nlink(inode);
+ cifs_drop_nlink(inode);
} else if ((rc == -EACCES) && (dosattr == 0) && inode) {
attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
if (attrs == NULL) {
@@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode,
* setting nlink not necessary except in cases where we failed to get it
* from the server or was set bogus
*/
+ spin_lock(&dentry->d_inode->i_lock);
if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2))
set_nlink(dentry->d_inode, 2);
-
+ spin_unlock(&dentry->d_inode->i_lock);
mode &= ~current_umask();
/* must turn on setgid bit if parent dir has it */
if (inode->i_mode & S_ISGID)
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 09e4b3ae4564..e6ce3b112875 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
if (old_file->d_inode) {
cifsInode = CIFS_I(old_file->d_inode);
if (rc == 0) {
+ spin_lock(&old_file->d_inode->i_lock);
inc_nlink(old_file->d_inode);
+ spin_unlock(&old_file->d_inode->i_lock);
/* BB should we make this contingent on superblock flag NOATIME? */
/* old_file->d_inode->i_ctime = CURRENT_TIME;*/
/* parent dir timestamps will update from srv
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index a4ff5d547554..e4d3b9964167 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
cERROR(1, "Bad protocol string signature header %x",
*(unsigned int *) hdr->ProtocolId);
if (mid != hdr->MessageId)
- cERROR(1, "Mids do not match");
+ cERROR(1, "Mids do not match: %llu and %llu", mid,
+ hdr->MessageId);
}
cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId);
return 1;
@@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length)
* ie Validate the wct via smb2_struct_sizes table above
*/
- if (length < 2 + sizeof(struct smb2_hdr)) {
+ if (length < sizeof(struct smb2_pdu)) {
if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) {
pdu->StructureSize2 = 0;
/*
@@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length)
return 1;
}
if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) {
- cERROR(1, "SMB length greater than maximum, mid=%lld", mid);
+ cERROR(1, "SMB length greater than maximum, mid=%llu", mid);
return 1;
}
if (check_smb2_hdr(hdr, mid))
return 1;
- if (hdr->StructureSize != SMB2_HEADER_SIZE) {
- cERROR(1, "Illegal structure size %d",
+ if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) {
+ cERROR(1, "Illegal structure size %u",
le16_to_cpu(hdr->StructureSize));
return 1;
}
@@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length)
if (4 + len != clc_len) {
cFYI(1, "Calculated size %u length %u mismatch mid %llu",
clc_len, 4 + len, mid);
- if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */
- return 0; /* BB workaround Samba 3 bug SessSetup rsp */
+ /* server can return one byte more */
+ if (clc_len == 4 + len + 1)
+ return 0;
return 1;
}
return 0;
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index f37a1b41b402..c5fbfac5d576 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -87,10 +87,6 @@
#define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe)
-#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64)
-
-#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
-
/*
* SMB2 Header Definition
*
@@ -99,6 +95,9 @@
* "PDU" : "Protocol Data Unit" (ie a network "frame")
*
*/
+
+#define SMB2_HEADER_STRUCTURE_SIZE __constant_le16_to_cpu(64)
+
struct smb2_hdr {
__be32 smb2_buf_length; /* big endian on wire */
/* length is only two or three bytes - with
@@ -140,6 +139,9 @@ struct smb2_pdu {
* command code name for the struct. Note that structures must be packed.
*
*/
+
+#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9)
+
struct smb2_err_rsp {
struct smb2_hdr hdr;
__le16 StructureSize;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 83867ef348df..d9b639b95fa8 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
/* convert the length into a more usable form */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
struct kvec iov;
+ int rc = 0;
iov.iov_base = mid->resp_buf;
iov.iov_len = len;
/* FIXME: add code to kill session */
- if (cifs_verify_signature(&iov, 1, server,
- mid->sequence_number + 1) != 0)
- cERROR(1, "Unexpected SMB signature");
+ rc = cifs_verify_signature(&iov, 1, server,
+ mid->sequence_number + 1);
+ if (rc)
+ cERROR(1, "SMB signature verification returned error = "
+ "%d", rc);
}
/* BB special case reconnect tid and uid here? */
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index 1585db1aa365..f936cb50dc0d 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -814,8 +814,8 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
struct bio *bio;
if (per_dev != master_dev) {
- bio = bio_kmalloc(GFP_KERNEL,
- master_dev->bio->bi_max_vecs);
+ bio = bio_clone_kmalloc(master_dev->bio,
+ GFP_KERNEL);
if (unlikely(!bio)) {
ORE_DBGMSG(
"Failed to allocate BIO size=%u\n",
@@ -824,7 +824,6 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
goto out;
}
- __bio_clone(bio, master_dev->bio);
bio->bi_bdev = NULL;
bio->bi_next = NULL;
per_dev->offset = master_dev->offset;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 26435890dc87..820e7aaad4fd 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -212,20 +212,41 @@ extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
-extern struct bio *bio_alloc(gfp_t, unsigned int);
-extern struct bio *bio_kmalloc(gfp_t, unsigned int);
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
extern void bio_put(struct bio *);
-extern void bio_free(struct bio *, struct bio_set *);
+
+extern void __bio_clone(struct bio *, struct bio *);
+extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
+
+extern struct bio_set *fs_bio_set;
+
+static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+}
+
+static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
+{
+ return bio_clone_bioset(bio, gfp_mask, fs_bio_set);
+}
+
+static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
+{
+ return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
+}
+
+static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
+{
+ return bio_clone_bioset(bio, gfp_mask, NULL);
+
+}
extern void bio_endio(struct bio *, int);
struct request_queue;
extern int bio_phys_segments(struct request_queue *, struct bio *);
-extern void __bio_clone(struct bio *, struct bio *);
-extern struct bio *bio_clone(struct bio *, gfp_t);
-
extern void bio_init(struct bio *);
+extern void bio_reset(struct bio *);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
@@ -304,8 +325,6 @@ struct biovec_slab {
struct kmem_cache *slab;
};
-extern struct bio_set *fs_bio_set;
-
/*
* a small number of entries is fine, not going to be performance critical.
* basically we just need to survive
@@ -367,9 +386,31 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
/*
* Check whether this bio carries any data or not. A NULL bio is allowed.
*/
-static inline int bio_has_data(struct bio *bio)
+static inline bool bio_has_data(struct bio *bio)
{
- return bio && bio->bi_io_vec != NULL;
+ if (bio && bio->bi_vcnt)
+ return true;
+
+ return false;
+}
+
+static inline bool bio_is_rw(struct bio *bio)
+{
+ if (!bio_has_data(bio))
+ return false;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return false;
+
+ return true;
+}
+
+static inline bool bio_mergeable(struct bio *bio)
+{
+ if (bio->bi_rw & REQ_NOMERGE_FLAGS)
+ return false;
+
+ return true;
}
/*
@@ -505,9 +546,8 @@ static inline struct bio *bio_list_get(struct bio_list *bl)
#define bio_integrity(bio) (bio->bi_integrity != NULL)
-extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *);
extern struct bio_integrity_payload *bio_integrity_alloc(struct bio *, gfp_t, unsigned int);
-extern void bio_integrity_free(struct bio *, struct bio_set *);
+extern void bio_integrity_free(struct bio *);
extern int bio_integrity_add_page(struct bio *, struct page *, unsigned int, unsigned int);
extern int bio_integrity_enabled(struct bio *bio);
extern int bio_integrity_set_tag(struct bio *, void *, unsigned int);
@@ -517,7 +557,7 @@ extern void bio_integrity_endio(struct bio *, int);
extern void bio_integrity_advance(struct bio *, unsigned int);
extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
-extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t, struct bio_set *);
+extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
extern int bioset_integrity_create(struct bio_set *, int);
extern void bioset_integrity_free(struct bio_set *);
extern void bio_integrity_init(void);
@@ -549,13 +589,13 @@ static inline int bio_integrity_prep(struct bio *bio)
return 0;
}
-static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs)
+static inline void bio_integrity_free(struct bio *bio)
{
return;
}
static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
- gfp_t gfp_mask, struct bio_set *bs)
+ gfp_t gfp_mask)
{
return 0;
}
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 7b7ac9ccec7a..cdf11191e645 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -59,12 +59,6 @@ struct bio {
unsigned int bi_seg_front_size;
unsigned int bi_seg_back_size;
- unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
-
- atomic_t bi_cnt; /* pin count */
-
- struct bio_vec *bi_io_vec; /* the actual vec list */
-
bio_end_io_t *bi_end_io;
void *bi_private;
@@ -80,7 +74,17 @@ struct bio {
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
- bio_destructor_t *bi_destructor; /* destructor */
+ /*
+ * Everything starting with bi_max_vecs will be preserved by bio_reset()
+ */
+
+ unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
+
+ atomic_t bi_cnt; /* pin count */
+
+ struct bio_vec *bi_io_vec; /* the actual vec list */
+
+ struct bio_set *bi_pool;
/*
* We can inline a number of vecs at the end of the bio, to avoid
@@ -90,6 +94,8 @@ struct bio {
struct bio_vec bi_inline_vecs[0];
};
+#define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
+
/*
* bio flags
*/
@@ -105,6 +111,13 @@ struct bio {
#define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */
#define BIO_QUIET 10 /* Make BIO Quiet */
#define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */
+
+/*
+ * Flags starting here get preserved by bio_reset() - this includes
+ * BIO_POOL_IDX()
+ */
+#define BIO_RESET_BITS 12
+
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/*
@@ -134,6 +147,7 @@ enum rq_flag_bits {
__REQ_PRIO, /* boost priority in cfq */
__REQ_DISCARD, /* request to discard sectors */
__REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
+ __REQ_WRITE_SAME, /* write same block many times */
__REQ_NOIDLE, /* don't anticipate more IO after this one */
__REQ_FUA, /* forced unit access */
@@ -172,15 +186,21 @@ enum rq_flag_bits {
#define REQ_META (1 << __REQ_META)
#define REQ_PRIO (1 << __REQ_PRIO)
#define REQ_DISCARD (1 << __REQ_DISCARD)
+#define REQ_WRITE_SAME (1 << __REQ_WRITE_SAME)
#define REQ_NOIDLE (1 << __REQ_NOIDLE)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_PRIO | \
- REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE)
+ REQ_DISCARD | REQ_WRITE_SAME | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | \
+ REQ_SECURE)
#define REQ_CLONE_MASK REQ_COMMON_MASK
+/* This mask is used for both bio and request merge checking */
+#define REQ_NOMERGE_FLAGS \
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+
#define REQ_RAHEAD (1 << __REQ_RAHEAD)
#define REQ_THROTTLED (1 << __REQ_THROTTLED)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4a2ab7c85393..1756001210d2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -270,6 +270,7 @@ struct queue_limits {
unsigned int io_min;
unsigned int io_opt;
unsigned int max_discard_sectors;
+ unsigned int max_write_same_sectors;
unsigned int discard_granularity;
unsigned int discard_alignment;
@@ -540,8 +541,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
#define blk_account_rq(rq) \
(((rq)->cmd_flags & REQ_STARTED) && \
- ((rq)->cmd_type == REQ_TYPE_FS || \
- ((rq)->cmd_flags & REQ_DISCARD)))
+ ((rq)->cmd_type == REQ_TYPE_FS))
#define blk_pm_request(rq) \
((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
@@ -595,17 +595,39 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
rl->flags &= ~flag;
}
+static inline bool rq_mergeable(struct request *rq)
+{
+ if (rq->cmd_type != REQ_TYPE_FS)
+ return false;
-/*
- * mergeable request must not have _NOMERGE or _BARRIER bit set, nor may
- * it already be started by driver.
- */
-#define RQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD)
-#define rq_mergeable(rq) \
- (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
- (((rq)->cmd_flags & REQ_DISCARD) || \
- (rq)->cmd_type == REQ_TYPE_FS))
+ if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
+ return false;
+
+ return true;
+}
+
+static inline bool blk_check_merge_flags(unsigned int flags1,
+ unsigned int flags2)
+{
+ if ((flags1 & REQ_DISCARD) != (flags2 & REQ_DISCARD))
+ return false;
+
+ if ((flags1 & REQ_SECURE) != (flags2 & REQ_SECURE))
+ return false;
+
+ if ((flags1 & REQ_WRITE_SAME) != (flags2 & REQ_WRITE_SAME))
+ return false;
+
+ return true;
+}
+
+static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
+{
+ if (bio_data(a) == bio_data(b))
+ return true;
+
+ return false;
+}
/*
* q->prep_rq_fn return values
@@ -802,6 +824,28 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
+ unsigned int cmd_flags)
+{
+ if (unlikely(cmd_flags & REQ_DISCARD))
+ return q->limits.max_discard_sectors;
+
+ if (unlikely(cmd_flags & REQ_WRITE_SAME))
+ return q->limits.max_write_same_sectors;
+
+ return q->limits.max_sectors;
+}
+
+static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
+{
+ struct request_queue *q = rq->q;
+
+ if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
+ return q->limits.max_hw_sectors;
+
+ return blk_queue_get_max_sectors(q, rq->cmd_flags);
+}
+
/*
* Request issue related functions.
*/
@@ -857,6 +901,8 @@ extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
+extern void blk_queue_max_write_same_sectors(struct request_queue *q,
+ unsigned int max_write_same_sectors);
extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
@@ -987,6 +1033,8 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask);
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
@@ -1164,6 +1212,16 @@ static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
return queue_discard_zeroes_data(bdev_get_queue(bdev));
}
+static inline unsigned int bdev_write_same(struct block_device *bdev)
+{
+ struct request_queue *q = bdev_get_queue(bdev);
+
+ if (q)
+ return q->limits.max_write_same_sectors;
+
+ return 0;
+}
+
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 47e3d4850584..94f58a102bbb 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,10 +53,10 @@
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.13"
+#define REL_VERSION "8.3.14"
#define API_VERSION 88
#define PRO_VERSION_MIN 86
-#define PRO_VERSION_MAX 96
+#define PRO_VERSION_MAX 97
enum drbd_io_error_p {
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h
index a8706f08ab36..f6a576df19e0 100644
--- a/include/linux/drbd_nl.h
+++ b/include/linux/drbd_nl.h
@@ -145,6 +145,7 @@ NL_PACKET(dump_ee, 24,
NL_PACKET(start_ov, 25,
NL_INT64( 66, T_MAY_IGNORE, start_sector)
+ NL_INT64( 90, T_MANDATORY, stop_sector)
)
NL_PACKET(new_c_uuid, 26,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index aa110476a95b..cefa9645fc85 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -335,6 +335,7 @@ struct inodes_stat_t {
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
+#define BLKZEROOUT _IO(0x12,127)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
@@ -415,6 +416,7 @@ struct inodes_stat_t {
#include <linux/migrate_mode.h>
#include <linux/uidgid.h>
#include <linux/lockdep.h>
+#include <linux/percpu-rwsem.h>
#include <asm/byteorder.h>
@@ -724,6 +726,8 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
+ /* A semaphore that prevents I/O while block size is being changed */
+ struct percpu_rw_semaphore bd_block_size_semaphore;
};
/*
@@ -2564,6 +2568,8 @@ extern int generic_segment_checks(const struct iovec *iov,
unsigned long *nr_segs, size_t *count, int access_flags);
/* fs/block_dev.c */
+extern ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
extern ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos);
extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 604382143bcf..594b419b7d20 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -82,10 +82,18 @@
__x - (__x % (y)); \
} \
)
+
+/*
+ * Divide positive or negative dividend by positive divisor and round
+ * to closest integer. Result is undefined for negative divisors.
+ */
#define DIV_ROUND_CLOSEST(x, divisor)( \
{ \
- typeof(divisor) __divisor = divisor; \
- (((x) + ((__divisor) / 2)) / (__divisor)); \
+ typeof(x) __x = x; \
+ typeof(divisor) __d = divisor; \
+ (((typeof(x))-1) >= 0 || (__x) >= 0) ? \
+ (((__x) + ((__d) / 2)) / (__d)) : \
+ (((__x) - ((__d) / 2)) / (__d)); \
} \
)
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 11a41a8f08eb..9635116dd830 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -44,7 +44,7 @@ struct loop_device {
int lo_encrypt_key_size;
struct loop_func_table *lo_encryption;
__u32 lo_init[2];
- uid_t lo_key_owner; /* Who set the key */
+ kuid_t lo_key_owner; /* Who set the key */
int (*ioctl)(struct loop_device *, int cmd,
unsigned long arg);
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 111aca5e97f3..4b27f9f503e4 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -239,6 +239,7 @@ struct mmc_card {
#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
+#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */
/* byte mode */
unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
#define MMC_NO_POWER_NOTIFICATION 0
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index fc3526077348..6b4565c440c8 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2149,7 +2149,7 @@
#define PCI_DEVICE_ID_TIGON3_5704S 0x16a8
#define PCI_DEVICE_ID_NX2_57800_VF 0x16a9
#define PCI_DEVICE_ID_NX2_5706S 0x16aa
-#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab
+#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4
#define PCI_DEVICE_ID_NX2_5708S 0x16ac
#define PCI_DEVICE_ID_NX2_57840_VF 0x16ad
#define PCI_DEVICE_ID_NX2_57810_MF 0x16ae
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
new file mode 100644
index 000000000000..cf80f7e5277f
--- /dev/null
+++ b/include/linux/percpu-rwsem.h
@@ -0,0 +1,89 @@
+#ifndef _LINUX_PERCPU_RWSEM_H
+#define _LINUX_PERCPU_RWSEM_H
+
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+
+struct percpu_rw_semaphore {
+ unsigned __percpu *counters;
+ bool locked;
+ struct mutex mtx;
+};
+
+static inline void percpu_down_read(struct percpu_rw_semaphore *p)
+{
+ rcu_read_lock();
+ if (unlikely(p->locked)) {
+ rcu_read_unlock();
+ mutex_lock(&p->mtx);
+ this_cpu_inc(*p->counters);
+ mutex_unlock(&p->mtx);
+ return;
+ }
+ this_cpu_inc(*p->counters);
+ rcu_read_unlock();
+}
+
+static inline void percpu_up_read(struct percpu_rw_semaphore *p)
+{
+ /*
+ * On X86, write operation in this_cpu_dec serves as a memory unlock
+ * barrier (i.e. memory accesses may be moved before the write, but
+ * no memory accesses are moved past the write).
+ * On other architectures this may not be the case, so we need smp_mb()
+ * there.
+ */
+#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
+ barrier();
+#else
+ smp_mb();
+#endif
+ this_cpu_dec(*p->counters);
+}
+
+static inline unsigned __percpu_count(unsigned __percpu *counters)
+{
+ unsigned total = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
+
+ return total;
+}
+
+static inline void percpu_down_write(struct percpu_rw_semaphore *p)
+{
+ mutex_lock(&p->mtx);
+ p->locked = true;
+ synchronize_rcu();
+ while (__percpu_count(p->counters))
+ msleep(1);
+ smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
+}
+
+static inline void percpu_up_write(struct percpu_rw_semaphore *p)
+{
+ p->locked = false;
+ mutex_unlock(&p->mtx);
+}
+
+static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
+{
+ p->counters = alloc_percpu(unsigned);
+ if (unlikely(!p->counters))
+ return -ENOMEM;
+ p->locked = false;
+ mutex_init(&p->mtx);
+ return 0;
+}
+
+static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
+{
+ free_percpu(p->counters);
+ p->counters = NULL; /* catch use after free bugs */
+}
+
+#endif
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 7b600da9a635..4bd6c06eb28e 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -201,6 +201,7 @@ static inline void *sg_virt(struct scatterlist *sg)
return page_address(sg_page(sg)) + sg->offset;
}
+int sg_nents(struct scatterlist *sg);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
diff --git a/include/linux/time.h b/include/linux/time.h
index b0bbd8f0130d..b51e664c83e7 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -125,6 +125,13 @@ static inline bool timespec_valid(const struct timespec *ts)
/* Can't have more nanoseconds then a second */
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return false;
+ return true;
+}
+
+static inline bool timespec_valid_strict(const struct timespec *ts)
+{
+ if (!timespec_valid(ts))
+ return false;
/* Disallow values that could overflow ktime_t */
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
return false;
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index e1ce1048fe5f..4a045cda9c60 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -18,6 +18,7 @@ struct nf_conntrack_ecache {
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
u32 pid; /* netlink pid of destroyer */
+ struct timer_list timeout;
};
static inline struct nf_conntrack_ecache *
diff --git a/init/Kconfig b/init/Kconfig
index af6c7f8ba019..75633d3db5c9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1023,7 +1023,6 @@ config UIDGID_CONVERTED
depends on AIRO_CS = n
depends on TUN = n
depends on INFINIBAND_QIB = n
- depends on BLK_DEV_LOOP = n
depends on ANDROID_BINDER_IPC = n
# Security modules
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 0c1485e42be6..34e5eac81424 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -428,7 +428,7 @@ int do_settimeofday(const struct timespec *tv)
struct timespec ts_delta, xt;
unsigned long flags;
- if (!timespec_valid(tv))
+ if (!timespec_valid_strict(tv))
return -EINVAL;
write_seqlock_irqsave(&tk->lock, flags);
@@ -476,7 +476,7 @@ int timekeeping_inject_offset(struct timespec *ts)
/* Make sure the proposed value is valid */
tmp = timespec_add(tk_xtime(tk), *ts);
- if (!timespec_valid(&tmp)) {
+ if (!timespec_valid_strict(&tmp)) {
ret = -EINVAL;
goto error;
}
@@ -659,7 +659,7 @@ void __init timekeeping_init(void)
struct timespec now, boot, tmp;
read_persistent_clock(&now);
- if (!timespec_valid(&now)) {
+ if (!timespec_valid_strict(&now)) {
pr_warn("WARNING: Persistent clock returned invalid value!\n"
" Check your CMOS/BIOS settings.\n");
now.tv_sec = 0;
@@ -667,7 +667,7 @@ void __init timekeeping_init(void)
}
read_boot_clock(&boot);
- if (!timespec_valid(&boot)) {
+ if (!timespec_valid_strict(&boot)) {
pr_warn("WARNING: Boot clock returned invalid value!\n"
" Check your CMOS/BIOS settings.\n");
boot.tv_sec = 0;
@@ -713,7 +713,7 @@ static struct timespec timekeeping_suspend_time;
static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
struct timespec *delta)
{
- if (!timespec_valid(delta)) {
+ if (!timespec_valid_strict(delta)) {
printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
"sleep delta value!\n");
return;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index fadae774a20c..1bf60efb5e02 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -39,6 +39,28 @@ struct scatterlist *sg_next(struct scatterlist *sg)
EXPORT_SYMBOL(sg_next);
/**
+ * sg_nents - return total count of entries in scatterlist
+ * @sg: The scatterlist
+ *
+ * Description:
+ * Allows to know how many entries are in sg, taking into acount
+ * chaining as well
+ *
+ **/
+int sg_nents(struct scatterlist *sg)
+{
+ int nents = 0;
+ while (sg) {
+ nents++;
+ sg = sg_next(sg);
+ }
+
+ return nents;
+}
+EXPORT_SYMBOL(sg_nents);
+
+
+/**
* sg_last - return the last scatterlist entry in a list
* @sgl: First entry in the scatterlist
* @nents: Number of entries in the scatterlist
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index bd92431d4c49..4ada3be6e252 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
break;
default:
- BUG();
+ return -EINVAL;
}
l = strlen(policy_modes[mode]);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 346b1eb83a1f..e4ba3e70c174 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -168,24 +168,16 @@ static void poll_napi(struct net_device *dev)
struct napi_struct *napi;
int budget = 16;
- WARN_ON_ONCE(!irqs_disabled());
-
list_for_each_entry(napi, &dev->napi_list, dev_list) {
- local_irq_enable();
if (napi->poll_owner != smp_processor_id() &&
spin_trylock(&napi->poll_lock)) {
- rcu_read_lock_bh();
budget = poll_one_napi(rcu_dereference_bh(dev->npinfo),
napi, budget);
- rcu_read_unlock_bh();
spin_unlock(&napi->poll_lock);
- if (!budget) {
- local_irq_disable();
+ if (!budget)
break;
- }
}
- local_irq_disable();
}
}
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 8eec8f4a0536..ebdf06f938bf 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
+static void ipmr_free_table(struct mr_table *mrt);
+
static int ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local);
@@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
struct mfc_cache *c, struct rtmsg *rtm);
+static void mroute_clean_tables(struct mr_table *mrt);
static void ipmr_expire_process(unsigned long arg);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
@@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net)
list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) {
list_del(&mrt->list);
- kfree(mrt);
+ ipmr_free_table(mrt);
}
fib_rules_unregister(net->ipv4.mr_rules_ops);
}
@@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net)
static void __net_exit ipmr_rules_exit(struct net *net)
{
- kfree(net->ipv4.mrt);
+ ipmr_free_table(net->ipv4.mrt);
}
#endif
@@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
return mrt;
}
+static void ipmr_free_table(struct mr_table *mrt)
+{
+ del_timer_sync(&mrt->ipmr_expire_timer);
+ mroute_clean_tables(mrt);
+ kfree(mrt);
+}
+
/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v)
diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c
index 4ad9cf173992..9c87cde28ff8 100644
--- a/net/ipv4/netfilter/nf_nat_sip.c
+++ b/net/ipv4/netfilter/nf_nat_sip.c
@@ -502,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff,
ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0)
break;
- else if (ret != -EBUSY) {
+ else if (ret == -EBUSY) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ } else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
port = 0;
break;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fd9ecb52c66b..82cf2a722b23 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -934,12 +934,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (mtu < ip_rt_min_pmtu)
mtu = ip_rt_min_pmtu;
+ rcu_read_lock();
if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
jiffies + ip_rt_mtu_expires);
}
+ rcu_read_unlock();
return mtu;
}
@@ -956,7 +958,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
dst->obsolete = DST_OBSOLETE_KILL;
} else {
rt->rt_pmtu = mtu;
- dst_set_expires(&rt->dst, ip_rt_mtu_expires);
+ rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires);
}
}
@@ -1263,7 +1265,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
{
struct rtable *rt = (struct rtable *) dst;
- if (dst->flags & DST_NOCACHE) {
+ if (!list_empty(&rt->rt_uncached)) {
spin_lock_bh(&rt_uncached_lock);
list_del(&rt->rt_uncached);
spin_unlock_bh(&rt_uncached_lock);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 85308b90df80..6e38c6c23caa 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
- int newly_acked_sacked, bool is_dupack,
+ int prior_sacked, bool is_dupack,
int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
+ int newly_acked_sacked = 0;
int fast_rexmit = 0;
if (WARN_ON(!tp->packets_out && tp->sacked_out))
@@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
break;
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
@@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
if (is_dupack)
tcp_add_reno_sack(sk);
}
+ newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked;
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
@@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
int prior_packets;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
- int newly_acked_sacked = 0;
bool frto_cwnd = false;
/* If the ack is older than previous acks
@@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
pkts_acked = prior_packets - tp->packets_out;
- newly_acked_sacked = (prior_packets - prior_sacked) -
- (tp->packets_out - tp->sacked_out);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
@@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
@@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
@@ -3718,8 +3718,7 @@ old_ack:
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
- newly_acked_sacked = tp->sacked_out - prior_sacked;
- tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
+ tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
is_dupack, flag);
}
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 6dc7fd353ef5..282f3723ee19 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
struct esp_data *esp = x->data;
/* skb is pure payload to encrypt */
- err = -ENOMEM;
-
aead = esp->aead;
alen = crypto_aead_authsize(aead);
@@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
}
tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
- if (!tmp)
+ if (!tmp) {
+ err = -ENOMEM;
goto error;
+ }
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 393355d37b47..513cab08a986 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
/* Remove from tunnel list */
spin_lock_bh(&pn->l2tp_tunnel_list_lock);
list_del_rcu(&tunnel->list);
+ kfree_rcu(tunnel, rcu);
spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
- synchronize_rcu();
atomic_dec(&l2tp_tunnel_count);
- kfree(tunnel);
}
/* Create a socket for the tunnel, if one isn't set up by
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index a38ec6cdeee1..56d583e083a7 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg {
struct l2tp_tunnel {
int magic; /* Should be L2TP_TUNNEL_MAGIC */
+ struct rcu_head rcu;
rwlock_t hlist_lock; /* protect session_hlist */
struct hlist_head session_hlist[L2TP_HASH_SIZE];
/* hashed list of sessions,
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index acf712ffb5e6..c5e8c9c31f76 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr,
sdata, NULL, NULL);
} else {
- int is_mesh_mcast = 1;
- const u8 *mesh_da;
+ /* DS -> MBSS (802.11-2012 13.11.3.3).
+ * For unicast with unknown forwarding information,
+ * destination might be in the MBSS or if that fails
+ * forwarded to another mesh gate. In either case
+ * resolution will be handled in ieee80211_xmit(), so
+ * leave the original DA. This also works for mcast */
+ const u8 *mesh_da = skb->data;
+
+ if (mppath)
+ mesh_da = mppath->mpp;
+ else if (mpath)
+ mesh_da = mpath->dst;
+ rcu_read_unlock();
- if (is_multicast_ether_addr(skb->data))
- /* DA TA mSA AE:SA */
- mesh_da = skb->data;
- else {
- static const u8 bcast[ETH_ALEN] =
- { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
- if (mppath) {
- /* RA TA mDA mSA AE:DA SA */
- mesh_da = mppath->mpp;
- is_mesh_mcast = 0;
- } else if (mpath) {
- mesh_da = mpath->dst;
- is_mesh_mcast = 0;
- } else {
- /* DA TA mSA AE:SA */
- mesh_da = bcast;
- }
- }
hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc,
mesh_da, sdata->vif.addr);
- rcu_read_unlock();
- if (is_mesh_mcast)
+ if (is_multicast_ether_addr(mesh_da))
+ /* DA TA mSA AE:SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
skb->data + ETH_ALEN,
NULL);
else
+ /* RA TA mDA mSA AE:DA SA */
meshhdrlen =
ieee80211_new_mesh_header(&mesh_hdr,
sdata,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 72bf32a84874..f51013c07b9f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
goto out_err;
}
svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
- if (!svc->stats.cpustats)
+ if (!svc->stats.cpustats) {
+ ret = -ENOMEM;
goto out_err;
+ }
/* I'm the first user of the service */
atomic_set(&svc->usecnt, 0);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index cf4875565d67..2ceec64b19f9 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack)
{
struct nf_conn *ct = (void *)ul_conntrack;
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
/* bad luck, let's retry again */
- ct->timeout.expires = jiffies +
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
return;
}
/* we've got the event delivered, now it's dying */
@@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack)
void nf_ct_insert_dying_list(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
+ struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
+
+ BUG_ON(ecache == NULL);
/* add this conntrack to the dying list */
spin_lock_bh(&nf_conntrack_lock);
@@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
/* set a new timer to retry event delivery */
- setup_timer(&ct->timeout, death_by_event, (unsigned long)ct);
- ct->timeout.expires = jiffies +
+ setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
+ ecache->timeout.expires = jiffies +
(random32() % net->ct.sysctl_events_retry_timeout);
- add_timer(&ct->timeout);
+ add_timer(&ecache->timeout);
}
EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index da4fc37a8578..9807f3278fcb 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2790,7 +2790,8 @@ static int __init ctnetlink_init(void)
goto err_unreg_subsys;
}
- if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ ret = register_pernet_subsys(&ctnetlink_net_ops);
+ if (ret < 0) {
pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 169ab59ed9d4..14e2f3903142 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -480,7 +480,7 @@ __build_packet_message(struct nfulnl_instance *inst,
}
if (indev && skb_mac_header_was_set(skb)) {
- if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
+ if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
nla_put_be16(inst->skb, NFULA_HWLEN,
htons(skb->dev->hard_header_len)) ||
nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
@@ -996,8 +996,10 @@ static int __init nfnetlink_log_init(void)
#ifdef CONFIG_PROC_FS
if (!proc_create("nfnetlink_log", 0440,
- proc_net_netfilter, &nful_file_ops))
+ proc_net_netfilter, &nful_file_ops)) {
+ status = -ENOMEM;
goto cleanup_logger;
+ }
#endif
return status;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1445d73533ed..527023823b5c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
dst_pid = addr->nl_pid;
dst_group = ffs(addr->nl_groups);
err = -EPERM;
- if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
+ if ((dst_group || dst_pid) &&
+ !netlink_capable(sock, NL_NONROOT_SEND))
goto out;
} else {
dst_pid = nlk->dst_pid;
@@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void)
rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
nl_table[NETLINK_USERSOCK].registered = 1;
+ nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND;
netlink_table_ungrab();
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index aee7196aac36..c5c9e2a54218 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1273,7 +1273,7 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock);
}
-bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
{
if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
return true;
diff --git a/net/socket.c b/net/socket.c
index a5471f804d99..edc3c4af9085 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
if (!err)
- err = compat_put_timeval(up, &ktv);
+ err = compat_put_timeval(&ktv, up);
return err;
}
@@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
if (!err)
- err = compat_put_timespec(up, &kts);
+ err = compat_put_timespec(&kts, up);
return err;
}
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 87cd0e4d4282..210be48d8ae3 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1994,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
goto error;
x->outer_mode = xfrm_get_mode(x->props.mode, family);
- if (x->outer_mode == NULL)
+ if (x->outer_mode == NULL) {
+ err = -EPROTONOSUPPORT;
goto error;
+ }
if (init_replay) {
err = xfrm_init_replay(x);
diff --git a/scripts/Makefile.fwinst b/scripts/Makefile.fwinst
index 6bf8e87f1dcf..c3f69ae275d1 100644
--- a/scripts/Makefile.fwinst
+++ b/scripts/Makefile.fwinst
@@ -42,7 +42,7 @@ quiet_cmd_install = INSTALL $(subst $(srctree)/,,$@)
$(installed-fw-dirs):
$(call cmd,mkdir)
-$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $(INSTALL_FW_PATH)/$$(dir %)
+$(installed-fw): $(INSTALL_FW_PATH)/%: $(obj)/% | $$(dir $(INSTALL_FW_PATH)/%)
$(call cmd,install)
PHONY += __fw_install __fw_modinst FORCE
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 4629038c9e5a..4235a6361fec 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -211,7 +211,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then
if ! cmp -s System.map .tmp_System.map; then
echo >&2 Inconsistent kallsyms data
- echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
+ echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
cleanup
exit 1
fi
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index f560051a949e..f25c24c743f9 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec)
kfree(codec);
}
+static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec,
+ hda_nid_t fg, unsigned int power_state);
+
static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
unsigned int power_state);
@@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus,
AC_VERB_GET_SUBSYSTEM_ID, 0);
}
+ codec->epss = snd_hda_codec_get_supported_ps(codec,
+ codec->afg ? codec->afg : codec->mfg,
+ AC_PWRST_EPSS);
+
/* power-up all before initialization */
hda_set_power_state(codec,
codec->afg ? codec->afg : codec->mfg,
@@ -3543,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg,
/* this delay seems necessary to avoid click noise at power-down */
if (power_state == AC_PWRST_D3) {
/* transition time less than 10ms for power down */
- bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS);
- msleep(epss ? 10 : 100);
+ msleep(codec->epss ? 10 : 100);
}
/* repeat power states setting at most 10 times*/
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 7fbc1bcaf1a9..e5a7e19a8071 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -862,6 +862,7 @@ struct hda_codec {
unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */
unsigned int no_jack_detect:1; /* Machine has no jack-detection */
unsigned int pcm_format_first:1; /* PCM format must be set first */
+ unsigned int epss:1; /* supporting EPSS? */
#ifdef CONFIG_SND_HDA_POWER_SAVE
unsigned int power_on :1; /* current (global) power-state */
int power_transition; /* power-state in transition */
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index ea5775a1a7db..6f806d3e56bb 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -4543,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec,
struct auto_pin_cfg *cfg = &spec->autocfg;
int i;
+ if (cfg->speaker_outs == 0)
+ return;
+
for (i = 0; i < cfg->line_outs; i++) {
if (presence)
break;
@@ -5531,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e);
}
+ codec->epss = 0; /* longer delay needed for D3 */
codec->no_trigger_sense = 1;
codec->spec = spec;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index d5b5c3388e28..4a469f0cb6d4 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
struct snd_usb_audio *chip)
{
struct snd_card *card;
- struct list_head *p;
+ struct list_head *p, *n;
if (chip == (void *)-1L)
return;
@@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
snd_usb_stream_disconnect(p);
}
/* release the endpoint resources */
- list_for_each(p, &chip->ep_list) {
+ list_for_each_safe(p, n, &chip->ep_list) {
snd_usb_endpoint_free(p);
}
/* release the midi resources */
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index c41181202688..d6e2bb49c59c 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep)
*
* For implicit feedback, next_packet_size() is unused.
*/
-static int next_packet_size(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep)
{
unsigned long flags;
int ret;
@@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep,
ep->retire_data_urb(ep->data_subs, urb);
}
-static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep,
- struct snd_urb_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < ctx->packets; ++i)
- ctx->packet_size[i] = next_packet_size(ep);
-}
-
/*
* Prepare a PLAYBACK urb for submission to the bus.
*/
@@ -370,7 +361,6 @@ static void snd_complete_urb(struct urb *urb)
goto exit_clear;
}
- prepare_outbound_urb_sizes(ep, ctx);
prepare_outbound_urb(ep, ctx);
} else {
retire_inbound_urb(ep, ctx);
@@ -799,7 +789,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
/**
* snd_usb_endpoint_start: start an snd_usb_endpoint
*
- * @ep: the endpoint to start
+ * @ep: the endpoint to start
+ * @can_sleep: flag indicating whether the operation is executed in
+ * non-atomic context
*
* A call to this function will increment the use count of the endpoint.
* In case it is not already running, the URBs for this endpoint will be
@@ -809,7 +801,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
*
* Returns an error if the URB submission failed, 0 in all other cases.
*/
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep)
{
int err;
unsigned int i;
@@ -821,6 +813,11 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
if (++ep->use_count != 1)
return 0;
+ /* just to be sure */
+ deactivate_urbs(ep, 0, can_sleep);
+ if (can_sleep)
+ wait_clear_urbs(ep);
+
ep->active_mask = 0;
ep->unlink_mask = 0;
ep->phase = 0;
@@ -850,7 +847,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep)
goto __error;
if (usb_pipeout(ep->pipe)) {
- prepare_outbound_urb_sizes(ep, urb->context);
prepare_outbound_urb(ep, urb->context);
} else {
prepare_inbound_urb(ep, urb->context);
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index ee2723fb174f..cbbbdf226d66 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep,
struct audioformat *fmt,
struct snd_usb_endpoint *sync_ep);
-int snd_usb_endpoint_start(struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep);
void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep,
int force, int can_sleep, int wait);
int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
@@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
void snd_usb_endpoint_free(struct list_head *head);
int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep);
+int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep);
void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep,
struct snd_usb_endpoint *sender,
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 62ec808ed792..fd5e982fc98c 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface,
}
}
-static int start_endpoints(struct snd_usb_substream *subs)
+static int start_endpoints(struct snd_usb_substream *subs, int can_sleep)
{
int err;
@@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs)
snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep);
ep->data_subs = subs;
- err = snd_usb_endpoint_start(ep);
+ err = snd_usb_endpoint_start(ep, can_sleep);
if (err < 0) {
clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags);
return err;
@@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs)
!test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) {
struct snd_usb_endpoint *ep = subs->sync_endpoint;
+ if (subs->data_endpoint->iface != subs->sync_endpoint->iface ||
+ subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) {
+ err = usb_set_interface(subs->dev,
+ subs->sync_endpoint->iface,
+ subs->sync_endpoint->alt_idx);
+ if (err < 0) {
+ snd_printk(KERN_ERR
+ "%d:%d:%d: cannot set interface (%d)\n",
+ subs->dev->devnum,
+ subs->sync_endpoint->iface,
+ subs->sync_endpoint->alt_idx, err);
+ return -EIO;
+ }
+ }
+
snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep);
ep->sync_slave = subs->data_endpoint;
- err = snd_usb_endpoint_start(ep);
+ err = snd_usb_endpoint_start(ep, can_sleep);
if (err < 0) {
clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags);
return err;
@@ -544,13 +559,10 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream)
subs->last_frame_number = 0;
runtime->delay = 0;
- /* clear the pending deactivation on the target EPs */
- deactivate_endpoints(subs);
-
/* for playback, submit the URBs now; otherwise, the first hwptr_done
* updates for all URBs would happen at the same time when starting */
if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK)
- return start_endpoints(subs);
+ return start_endpoints(subs, 1);
return 0;
}
@@ -1032,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
struct urb *urb)
{
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
+ struct snd_usb_endpoint *ep = subs->data_endpoint;
struct snd_urb_ctx *ctx = urb->context;
unsigned int counts, frames, bytes;
int i, stride, period_elapsed = 0;
@@ -1043,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
urb->number_of_packets = 0;
spin_lock_irqsave(&subs->lock, flags);
for (i = 0; i < ctx->packets; i++) {
- counts = ctx->packet_size[i];
+ if (ctx->packet_size[i])
+ counts = ctx->packet_size[i];
+ else
+ counts = snd_usb_endpoint_next_packet_size(ep);
+
/* set up descriptor */
urb->iso_frame_desc[i].offset = frames * stride;
urb->iso_frame_desc[i].length = counts * stride;
@@ -1094,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs,
subs->hwptr_done += bytes;
if (subs->hwptr_done >= runtime->buffer_size * stride)
subs->hwptr_done -= runtime->buffer_size * stride;
+
+ /* update delay with exact number of samples queued */
+ runtime->delay = subs->last_delay;
runtime->delay += frames;
+ subs->last_delay = runtime->delay;
+
+ /* realign last_frame_number */
+ subs->last_frame_number = usb_get_current_frame_number(subs->dev);
+ subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
+
spin_unlock_irqrestore(&subs->lock, flags);
urb->transfer_buffer_length = bytes;
if (period_elapsed)
@@ -1112,12 +1138,26 @@ static void retire_playback_urb(struct snd_usb_substream *subs,
struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime;
int stride = runtime->frame_bits >> 3;
int processed = urb->transfer_buffer_length / stride;
+ int est_delay;
spin_lock_irqsave(&subs->lock, flags);
- if (processed > runtime->delay)
- runtime->delay = 0;
+ est_delay = snd_usb_pcm_delay(subs, runtime->rate);
+ /* update delay with exact number of samples played */
+ if (processed > subs->last_delay)
+ subs->last_delay = 0;
else
- runtime->delay -= processed;
+ subs->last_delay -= processed;
+ runtime->delay = subs->last_delay;
+
+ /*
+ * Report when delay estimate is off by more than 2ms.
+ * The error should be lower than 2ms since the estimate relies
+ * on two reads of a counter updated every ms.
+ */
+ if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
+ snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
+ est_delay, subs->last_delay);
+
spin_unlock_irqrestore(&subs->lock, flags);
}
@@ -1175,7 +1215,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
- err = start_endpoints(subs);
+ err = start_endpoints(subs, 0);
if (err < 0)
return err;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 246852397e30..d617f69131d7 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp,
if (copy_from_user(&csigset, sigmask_arg->sigset,
sizeof csigset))
goto out;
- }
- sigset_from_compat(&sigset, &csigset);
- r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+ sigset_from_compat(&sigset, &csigset);
+ r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
+ } else
+ r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
break;
}
default: