diff options
48 files changed, 630 insertions, 530 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 52c80c2a57f2..600eef3f3ac7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -351,7 +351,7 @@ config SGI_IP27 select ARC64 select BOOT_ELF64 select DEFAULT_SGI_PARTITION - select DMA_IP27 + select DMA_COHERENT select SYS_HAS_EARLY_PRINTK select HW_HAS_PCI select NR_CPUS_DEFAULT_64 @@ -761,9 +761,6 @@ config CFE config DMA_COHERENT bool -config DMA_IP27 - bool - config DMA_NONCOHERENT bool select DMA_NEED_PCI_MAP_STATE @@ -1368,7 +1365,7 @@ config CPU_SUPPORTS_64BIT_KERNEL # config HARDWARE_WATCHPOINTS bool - default y if CPU_MIPS32 || CPU_MIPS64 + default y if CPU_MIPSR1 || CPU_MIPSR2 menu "Kernel type" diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index 32880146cbc1..6fd441d16af5 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -89,7 +89,7 @@ static struct clock_event_device au1x_rtcmatch2_clockdev = { .irq = AU1000_RTC_MATCH2_INT, .set_next_event = au1x_rtcmatch2_set_next_event, .set_mode = au1x_rtcmatch2_set_mode, - .cpumask = CPU_MASK_ALL, + .cpumask = CPU_MASK_ALL_PTR, }; static struct irqaction au1x_rtcmatch2_irqaction = { diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index e085feddb4a4..5f4e49ba4713 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -15,13 +15,11 @@ #include <linux/serial.h> #include <linux/types.h> #include <linux/string.h> /* for memset */ -#include <linux/serial.h> #include <linux/tty.h> #include <linux/time.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> -#include <linux/string.h> #include <asm/processor.h> #include <asm/reboot.h> diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig index 34ea319be94c..f2baea3039bb 100644 --- a/arch/mips/configs/ip27_defconfig +++ b/arch/mips/configs/ip27_defconfig @@ -53,7 +53,7 @@ CONFIG_GENERIC_TIME=y CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_ARC=y -CONFIG_DMA_IP27=y +CONFIG_DMA_COHERENT=y CONFIG_EARLY_PRINTK=y CONFIG_SYS_HAS_EARLY_PRINTK=y # CONFIG_NO_IOPORT is not set diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index c996c3b4d074..1b332e15ab52 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -50,7 +50,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -62,7 +62,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -95,7 +95,7 @@ static __inline__ void atomic_add(int i, atomic_t * v) static __inline__ void atomic_sub(int i, atomic_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -107,7 +107,7 @@ static __inline__ void atomic_sub(int i, atomic_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -135,12 +135,12 @@ static __inline__ void atomic_sub(int i, atomic_t * v) */ static __inline__ int atomic_add_return(int i, atomic_t * v) { - unsigned long result; + int result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -154,7 +154,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -187,12 +187,12 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) static __inline__ int atomic_sub_return(int i, atomic_t * v) { - unsigned long result; + int result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -206,7 +206,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -247,12 +247,12 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) */ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) { - unsigned long result; + int result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -270,7 +270,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + int temp; __asm__ __volatile__( " .set mips3 \n" @@ -429,7 +429,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) static __inline__ void atomic64_add(long i, atomic64_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -441,7 +441,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -474,7 +474,7 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) static __inline__ void atomic64_sub(long i, atomic64_t * v) { if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -486,7 +486,7 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) : "=&r" (temp), "=m" (v->counter) : "Ir" (i), "m" (v->counter)); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -514,12 +514,12 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) */ static __inline__ long atomic64_add_return(long i, atomic64_t * v) { - unsigned long result; + long result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -533,7 +533,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -566,12 +566,12 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) static __inline__ long atomic64_sub_return(long i, atomic64_t * v) { - unsigned long result; + long result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -585,7 +585,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -626,12 +626,12 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) */ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) { - unsigned long result; + long result; smp_llsc_mb(); if (cpu_has_llsc && R10000_LLSC_WAR) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" @@ -649,7 +649,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) : "Ir" (i), "m" (v->counter) : "memory"); } else if (cpu_has_llsc) { - unsigned long temp; + long temp; __asm__ __volatile__( " .set mips3 \n" diff --git a/arch/mips/include/asm/mach-rc32434/gpio.h b/arch/mips/include/asm/mach-rc32434/gpio.h index b5cf6457305a..3cb50d17b62d 100644 --- a/arch/mips/include/asm/mach-rc32434/gpio.h +++ b/arch/mips/include/asm/mach-rc32434/gpio.h @@ -80,11 +80,8 @@ struct rb532_gpio_reg { /* Compact Flash GPIO pin */ #define CF_GPIO_NUM 13 -extern void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned val); -extern unsigned get_434_reg(unsigned reg_offs); -extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); -extern unsigned char get_latch_u5(void); extern void rb532_gpio_set_ilevel(int bit, unsigned gpio); extern void rb532_gpio_set_istat(int bit, unsigned gpio); +extern void rb532_gpio_set_func(unsigned gpio); #endif /* _RC32434_GPIO_H_ */ diff --git a/arch/mips/include/asm/mach-rc32434/irq.h b/arch/mips/include/asm/mach-rc32434/irq.h index 56738d8ec4e2..023a5b100ed0 100644 --- a/arch/mips/include/asm/mach-rc32434/irq.h +++ b/arch/mips/include/asm/mach-rc32434/irq.h @@ -30,4 +30,7 @@ #define ETH0_RX_OVR_IRQ (GROUP3_IRQ_BASE + 9) #define ETH0_TX_UND_IRQ (GROUP3_IRQ_BASE + 10) +#define GPIO_MAPPED_IRQ_BASE GROUP4_IRQ_BASE +#define GPIO_MAPPED_IRQ_GROUP 4 + #endif /* __ASM_RC32434_IRQ_H */ diff --git a/arch/mips/include/asm/mach-rc32434/rb.h b/arch/mips/include/asm/mach-rc32434/rb.h index f25a84916703..6dc5f8df1f3e 100644 --- a/arch/mips/include/asm/mach-rc32434/rb.h +++ b/arch/mips/include/asm/mach-rc32434/rb.h @@ -83,4 +83,7 @@ struct mpmc_device { void __iomem *base; }; +extern void set_latch_u5(unsigned char or_mask, unsigned char nand_mask); +extern unsigned char get_latch_u5(void); + #endif /* __ASM_RC32434_RB_H */ diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index 1f30d16d4669..ce47118e52b7 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -105,7 +105,7 @@ struct pt_watch_regs { enum pt_watch_style style; union { struct mips32_watch_regs mips32; - struct mips32_watch_regs mips64; + struct mips64_watch_regs mips64; }; }; diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h index a275661fa7e1..8f77f774a2a0 100644 --- a/arch/mips/include/asm/termios.h +++ b/arch/mips/include/asm/termios.h @@ -9,6 +9,7 @@ #ifndef _ASM_TERMIOS_H #define _ASM_TERMIOS_H +#include <linux/errno.h> #include <asm/termbits.h> #include <asm/ioctls.h> @@ -94,38 +95,81 @@ struct termio { /* * Translate a "termio" structure into a "termios". Ugh. */ -#define user_termio_to_kernel_termios(termios, termio) \ -({ \ - unsigned short tmp; \ - get_user(tmp, &(termio)->c_iflag); \ - (termios)->c_iflag = (0xffff0000 & ((termios)->c_iflag)) | tmp; \ - get_user(tmp, &(termio)->c_oflag); \ - (termios)->c_oflag = (0xffff0000 & ((termios)->c_oflag)) | tmp; \ - get_user(tmp, &(termio)->c_cflag); \ - (termios)->c_cflag = (0xffff0000 & ((termios)->c_cflag)) | tmp; \ - get_user(tmp, &(termio)->c_lflag); \ - (termios)->c_lflag = (0xffff0000 & ((termios)->c_lflag)) | tmp; \ - get_user((termios)->c_line, &(termio)->c_line); \ - copy_from_user((termios)->c_cc, (termio)->c_cc, NCC); \ -}) +static inline int user_termio_to_kernel_termios(struct ktermios *termios, + struct termio __user *termio) +{ + unsigned short iflag, oflag, cflag, lflag; + unsigned int err; + + if (!access_ok(VERIFY_READ, termio, sizeof(struct termio))) + return -EFAULT; + + err = __get_user(iflag, &termio->c_iflag); + termios->c_iflag = (termios->c_iflag & 0xffff0000) | iflag; + err |=__get_user(oflag, &termio->c_oflag); + termios->c_oflag = (termios->c_oflag & 0xffff0000) | oflag; + err |=__get_user(cflag, &termio->c_cflag); + termios->c_cflag = (termios->c_cflag & 0xffff0000) | cflag; + err |=__get_user(lflag, &termio->c_lflag); + termios->c_lflag = (termios->c_lflag & 0xffff0000) | lflag; + err |=__get_user(termios->c_line, &termio->c_line); + if (err) + return -EFAULT; + + if (__copy_from_user(termios->c_cc, termio->c_cc, NCC)) + return -EFAULT; + + return 0; +} /* * Translate a "termios" structure into a "termio". Ugh. */ -#define kernel_termios_to_user_termio(termio, termios) \ -({ \ - put_user((termios)->c_iflag, &(termio)->c_iflag); \ - put_user((termios)->c_oflag, &(termio)->c_oflag); \ - put_user((termios)->c_cflag, &(termio)->c_cflag); \ - put_user((termios)->c_lflag, &(termio)->c_lflag); \ - put_user((termios)->c_line, &(termio)->c_line); \ - copy_to_user((termio)->c_cc, (termios)->c_cc, NCC); \ -}) - -#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios2)) -#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios2)) -#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios)) -#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios)) +static inline int kernel_termios_to_user_termio(struct termio __user *termio, + struct ktermios *termios) +{ + int err; + + if (!access_ok(VERIFY_WRITE, termio, sizeof(struct termio))) + return -EFAULT; + + err = __put_user(termios->c_iflag, &termio->c_iflag); + err |= __put_user(termios->c_oflag, &termio->c_oflag); + err |= __put_user(termios->c_cflag, &termio->c_cflag); + err |= __put_user(termios->c_lflag, &termio->c_lflag); + err |= __put_user(termios->c_line, &termio->c_line); + if (err) + return -EFAULT; + + if (__copy_to_user(termio->c_cc, termios->c_cc, NCC)) + return -EFAULT; + + return 0; +} + +static inline int user_termios_to_kernel_termios(struct ktermios __user *k, + struct termios2 *u) +{ + return copy_from_user(k, u, sizeof(struct termios2)) ? -EFAULT : 0; +} + +static inline int kernel_termios_to_user_termios(struct termios2 __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios2)) ? -EFAULT : 0; +} + +static inline int user_termios_to_kernel_termios_1(struct ktermios *k, + struct termios __user *u) +{ + return copy_from_user(k, u, sizeof(struct termios)) ? -EFAULT : 0; +} + +static inline int kernel_termios_to_user_termios_1(struct termios __user *u, + struct ktermios *k) +{ + return copy_to_user(u, k, sizeof(struct termios)) ? -EFAULT : 0; +} #endif /* defined(__KERNEL__) */ diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h index 88badb423010..964ef7ede268 100644 --- a/arch/mips/include/asm/txx9/tx4939.h +++ b/arch/mips/include/asm/txx9/tx4939.h @@ -541,5 +541,6 @@ void tx4939_irq_init(void); int tx4939_irq(void); void tx4939_mtd_init(int ch); void tx4939_ata_init(void); +void tx4939_rtc_init(void); #endif /* __ASM_TXX9_TX4939_H */ diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index fb6f73148df2..8882e5766f27 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -458,7 +458,11 @@ NESTED(nmi_handler, PT_SIZE, sp) BUILD_HANDLER fpe fpe fpe silent /* #15 */ BUILD_HANDLER mdmx mdmx sti silent /* #22 */ #ifdef CONFIG_HARDWARE_WATCHPOINTS - BUILD_HANDLER watch watch sti silent /* #23 */ + /* + * For watch, interrupts will be enabled after the watch + * registers are read. + */ + BUILD_HANDLER watch watch cli silent /* #23 */ #else BUILD_HANDLER watch watch sti verbose /* #23 */ #endif diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 5e77a3a21f98..42461310b185 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c @@ -79,7 +79,8 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, euid = current_euid(); retval = -EPERM; - if (euid != p->euid && euid != p->uid && !capable(CAP_SYS_NICE)) { + if (euid != p->cred->euid && euid != p->cred->uid && + !capable(CAP_SYS_NICE)) { read_unlock(&tasklist_lock); goto out_unlock; } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index f6083c6bfaa4..b2d7041341b8 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -944,6 +944,9 @@ asmlinkage void do_mdmx(struct pt_regs *regs) force_sig(SIGILL, current); } +/* + * Called with interrupts disabled. + */ asmlinkage void do_watch(struct pt_regs *regs) { u32 cause; @@ -963,9 +966,12 @@ asmlinkage void do_watch(struct pt_regs *regs) */ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { mips_read_watch_registers(); + local_irq_enable(); force_sig(SIGTRAP, current); - } else + } else { mips_clear_watch_registers(); + local_irq_enable(); + } } asmlinkage void do_mcheck(struct pt_regs *regs) @@ -1582,7 +1588,11 @@ void __init set_handler(unsigned long offset, void *addr, unsigned long size) static char panic_null_cerr[] __cpuinitdata = "Trying to set NULL cache error exception handler"; -/* Install uncached CPU exception handler */ +/* + * Install uncached CPU exception handler. + * This is suitable only for the cache error exception which is the only + * exception handler that is being run uncached. + */ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { @@ -1593,7 +1603,7 @@ void __cpuinit set_uncached_handler(unsigned long offset, void *addr, unsigned long uncached_ebase = TO_UNCAC(ebase); #endif if (cpu_has_mips_r2) - ebase += (read_c0_ebase() & 0x3ffff000); + uncached_ebase += (read_c0_ebase() & 0x3ffff000); if (!addr) panic(panic_null_cerr); diff --git a/arch/mips/lib/memcpy-inatomic.S b/arch/mips/lib/memcpy-inatomic.S index 736d0fb56a94..68853a038d3f 100644 --- a/arch/mips/lib/memcpy-inatomic.S +++ b/arch/mips/lib/memcpy-inatomic.S @@ -21,7 +21,7 @@ * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ -#if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) +#ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c06cccf60bec..56a1f85a1ce8 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -21,7 +21,7 @@ * end of memory on some systems. It's also a seriously bad idea on non * dma-coherent systems. */ -#if !defined(CONFIG_DMA_COHERENT) || !defined(CONFIG_DMA_IP27) +#ifdef CONFIG_DMA_NONCOHERENT #undef CONFIG_CPU_HAS_PREFETCH #endif #ifdef CONFIG_MIPS_MALTA diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 6e99665ae860..c43f4b26a690 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -618,15 +618,35 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); - else + else { + unsigned long lsize = cpu_scache_line_size(); + unsigned long almask = ~(lsize - 1); + + /* + * There is no clearly documented alignment requirement + * for the cache instruction on MIPS processors and + * some processors, among them the RM5200 and RM7000 + * QED processors will throw an address error for cache + * hit ops with insufficient alignment. Solved by + * aligning the address to cache line size. + */ + cache_op(Hit_Writeback_Inv_SD, addr & almask); + cache_op(Hit_Writeback_Inv_SD, + (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); + } return; } if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { + unsigned long lsize = cpu_dcache_line_size(); + unsigned long almask = ~(lsize - 1); + R4600_HIT_CACHEOP_WAR_IMPL; + cache_op(Hit_Writeback_Inv_D, addr & almask); + cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); } diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index fa636fc6b7b9..55767ad9f00e 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -97,7 +97,6 @@ good_area: goto bad_area; } -survive: /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo @@ -167,21 +166,13 @@ no_context: field, regs->regs[31]); die("Oops", regs); -/* - * We ran out of memory, or some other thing happened to us that made - * us unable to handle the page fault gracefully. - */ out_of_memory: - up_read(&mm->mmap_sem); - if (is_global_init(tsk)) { - yield(); - down_read(&mm->mmap_sem); - goto survive; - } - printk("VM: killing process %s\n", tsk->comm); - if (user_mode(regs)) - do_group_exit(SIGKILL); - goto no_context; + /* + * We ran out of memory, call the OOM killer, and return the userspace + * (which will retry the fault, or kill us if we got oom-killed). + */ + pagefault_out_of_memory(); + return; do_sigbus: up_read(&mm->mmap_sem); diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 1c2821e2f494..71f7d27b0d4c 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c @@ -205,6 +205,8 @@ static int __init rc32434_pcibridge_init(void) static int __init rc32434_pci_init(void) { + void __iomem *io_map_base; + pr_info("PCI: Initializing PCI\n"); ioport_resource.start = rc32434_res_pci_io1.start; @@ -212,6 +214,15 @@ static int __init rc32434_pci_init(void) rc32434_pcibridge_init(); + io_map_base = ioremap(rc32434_res_pci_io1.start, + rc32434_res_pci_io1.end - rc32434_res_pci_io1.start + 1); + + if (!io_map_base) + return -ENOMEM; + + rc32434_controller.io_map_base = + (unsigned long)io_map_base - rc32434_res_pci_io1.start; + register_pci_controller(&rc32434_controller); rc32434_sync(); diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c index c1c29181bd46..4a5f05b662ae 100644 --- a/arch/mips/rb532/devices.c +++ b/arch/mips/rb532/devices.c @@ -24,6 +24,7 @@ #include <linux/mtd/partitions.h> #include <linux/gpio_keys.h> #include <linux/input.h> +#include <linux/serial_8250.h> #include <asm/bootinfo.h> @@ -39,6 +40,29 @@ #define ETH0_RX_DMA_ADDR (DMA0_BASE_ADDR + 0 * DMA_CHAN_OFFSET) #define ETH0_TX_DMA_ADDR (DMA0_BASE_ADDR + 1 * DMA_CHAN_OFFSET) +extern unsigned int idt_cpu_freq; + +static struct mpmc_device dev3; + +void set_latch_u5(unsigned char or_mask, unsigned char nand_mask) +{ + unsigned long flags; + + spin_lock_irqsave(&dev3.lock, flags); + + dev3.state = (dev3.state | or_mask) & ~nand_mask; + writeb(dev3.state, dev3.base); + + spin_unlock_irqrestore(&dev3.lock, flags); +} +EXPORT_SYMBOL(set_latch_u5); + +unsigned char get_latch_u5(void) +{ + return dev3.state; +} +EXPORT_SYMBOL(get_latch_u5); + static struct resource korina_dev0_res[] = { { .name = "korina_regs", @@ -86,7 +110,7 @@ static struct korina_device korina_dev0_data = { static struct platform_device korina_dev0 = { .id = -1, .name = "korina", - .dev.platform_data = &korina_dev0_data, + .dev.driver_data = &korina_dev0_data, .resource = korina_dev0_res, .num_resources = ARRAY_SIZE(korina_dev0_res), }; @@ -214,12 +238,32 @@ static struct platform_device rb532_wdt = { .num_resources = ARRAY_SIZE(rb532_wdt_res), }; +static struct plat_serial8250_port rb532_uart_res[] = { + { + .membase = (char *)KSEG1ADDR(REGBASE + UART0BASE), + .irq = UART0_IRQ, + .regshift = 2, + .iotype = UPIO_MEM, + .flags = UPF_BOOT_AUTOCONF, + }, + { + .flags = 0, + } +}; + +static struct platform_device rb532_uart = { + .name = "serial8250", + .id = PLAT8250_DEV_PLATFORM, + .dev.platform_data = &rb532_uart_res, +}; + static struct platform_device *rb532_devs[] = { &korina_dev0, &nand_slot0, &cf_slot0, &rb532_led, &rb532_button, + &rb532_uart, &rb532_wdt }; @@ -291,9 +335,20 @@ static int __init plat_setup_devices(void) nand_slot0_res[0].start = readl(IDT434_REG_BASE + DEV2BASE); nand_slot0_res[0].end = nand_slot0_res[0].start + 0x1000; + /* Read and map device controller 3 */ + dev3.base = ioremap_nocache(readl(IDT434_REG_BASE + DEV3BASE), 1); + + if (!dev3.base) { + printk(KERN_ERR "rb532: cannot remap device controller 3\n"); + return -ENXIO; + } + /* Initialise the NAND device */ rb532_nand_setup(); + /* set the uart clock to the current cpu frequency */ + rb532_uart_res[0].uartclk = idt_cpu_freq; + return platform_add_devices(rb532_devs, ARRAY_SIZE(rb532_devs)); } diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c index 0e84c8ab6a39..37de05d595e7 100644 --- a/arch/mips/rb532/gpio.c +++ b/arch/mips/rb532/gpio.c @@ -41,8 +41,6 @@ struct rb532_gpio_chip { void __iomem *regbase; }; -struct mpmc_device dev3; - static struct resource rb532_gpio_reg0_res[] = { { .name = "gpio_reg0", @@ -52,61 +50,6 @@ static struct resource rb532_gpio_reg0_res[] = { } }; -static struct resource rb532_dev3_ctl_res[] = { - { - .name = "dev3_ctl", - .start = REGBASE + DEV3BASE, - .end = REGBASE + DEV3BASE + sizeof(struct dev_reg) - 1, - .flags = IORESOURCE_MEM, - } -}; - -void set_434_reg(unsigned reg_offs, unsigned bit, unsigned len, unsigned val) -{ - unsigned long flags; - unsigned data; - unsigned i = 0; - - spin_lock_irqsave(&dev3.lock, flags); - - data = readl(IDT434_REG_BASE + reg_offs); - for (i = 0; i != len; ++i) { - if (val & (1 << i)) - data |= (1 << (i + bit)); - else - data &= ~(1 << (i + bit)); - } - writel(data, (IDT434_REG_BASE + reg_offs)); - - spin_unlock_irqrestore(&dev3.lock, flags); -} -EXPORT_SYMBOL(set_434_reg); - -unsigned get_434_reg(unsigned reg_offs) -{ - return readl(IDT434_REG_BASE + reg_offs); -} -EXPORT_SYMBOL(get_434_reg); - -void set_latch_u5(unsigned char or_mask, unsigned char nand_mask) -{ - unsigned long flags; - - spin_lock_irqsave(&dev3.lock, flags); - - dev3.state = (dev3.state | or_mask) & ~nand_mask; - writel(dev3.state, &dev3.base); - - spin_unlock_irqrestore(&dev3.lock, flags); -} -EXPORT_SYMBOL(set_latch_u5); - -unsigned char get_latch_u5(void) -{ - return dev3.state; -} -EXPORT_SYMBOL(get_latch_u5); - /* rb532_set_bit - sanely set a bit * * bitval: new value for the bit @@ -119,13 +62,11 @@ static inline void rb532_set_bit(unsigned bitval, unsigned long flags; u32 val; - bitval = !!bitval; /* map parameter to {0,1} */ - local_irq_save(flags); val = readl(ioaddr); - val &= ~( ~bitval << offset ); /* unset bit if bitval == 0 */ - val |= ( bitval << offset ); /* set bit if bitval == 1 */ + val &= ~(!bitval << offset); /* unset bit if bitval == 0 */ + val |= (!!bitval << offset); /* set bit if bitval == 1 */ writel(val, ioaddr); local_irq_restore(flags); @@ -171,8 +112,8 @@ static int rb532_gpio_direction_input(struct gpio_chip *chip, unsigned offset) gpch = container_of(chip, struct rb532_gpio_chip, chip); - if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) - return 1; /* alternate function, GPIOCFG is ignored */ + /* disable alternate function in case it's set */ + rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC); rb532_set_bit(0, offset, gpch->regbase + GPIOCFG); return 0; @@ -188,8 +129,8 @@ static int rb532_gpio_direction_output(struct gpio_chip *chip, gpch = container_of(chip, struct rb532_gpio_chip, chip); - if (rb532_get_bit(offset, gpch->regbase + GPIOFUNC)) - return 1; /* alternate function, GPIOCFG is ignored */ + /* disable alternate function in case it's set */ + rb532_set_bit(0, offset, gpch->regbase + GPIOFUNC); /* set the initial output value */ rb532_set_bit(value, offset, gpch->regbase + GPIOD); @@ -233,10 +174,11 @@ EXPORT_SYMBOL(rb532_gpio_set_istat); /* * Configure GPIO alternate function */ -static void rb532_gpio_set_func(int bit, unsigned gpio) +void rb532_gpio_set_func(unsigned gpio) { - rb532_set_bit(bit, gpio, rb532_gpio_chip->regbase + GPIOFUNC); + rb532_set_bit(1, gpio, rb532_gpio_chip->regbase + GPIOFUNC); } +EXPORT_SYMBOL(rb532_gpio_set_func); int __init rb532_gpio_init(void) { @@ -253,20 +195,6 @@ int __init rb532_gpio_init(void) /* Register our GPIO chip */ gpiochip_add(&rb532_gpio_chip->chip); - r = rb532_dev3_ctl_res; - dev3.base = ioremap_nocache(r->start, r->end - r->start); - - if (!dev3.base) { - printk(KERN_ERR "rb532: cannot remap device controller 3\n"); - return -ENXIO; - } - - /* configure CF_GPIO_NUM as CFRDY IRQ source */ - rb532_gpio_set_func(0, CF_GPIO_NUM); - rb532_gpio_direction_input(&rb532_gpio_chip->chip, CF_GPIO_NUM); - rb532_gpio_set_ilevel(1, CF_GPIO_NUM); - rb532_gpio_set_istat(0, CF_GPIO_NUM); - return 0; } arch_initcall(rb532_gpio_init); diff --git a/arch/mips/rb532/irq.c b/arch/mips/rb532/irq.c index 549b46d2fcee..53eeb5e7bc5b 100644 --- a/arch/mips/rb532/irq.c +++ b/arch/mips/rb532/irq.c @@ -46,6 +46,7 @@ #include <asm/system.h> #include <asm/mach-rc32434/irq.h> +#include <asm/mach-rc32434/gpio.h> struct intr_group { u32 mask; /* mask of valid bits in pending/mask registers */ @@ -150,6 +151,9 @@ static void rb532_disable_irq(unsigned int irq_nr) mask |= intr_bit; WRITE_MASK(addr, mask); + if (group == GPIO_MAPPED_IRQ_GROUP) + rb532_gpio_set_istat(0, irq_nr - GPIO_MAPPED_IRQ_BASE); + /* * if there are no more interrupts enabled in this * group, disable corresponding IP @@ -165,12 +169,35 @@ static void rb532_mask_and_ack_irq(unsigned int irq_nr) ack_local_irq(group_to_ip(irq_to_group(irq_nr))); } +static int rb532_set_type(unsigned int irq_nr, unsigned type) +{ + int gpio = irq_nr - GPIO_MAPPED_IRQ_BASE; + int group = irq_to_group(irq_nr); + + if (group != GPIO_MAPPED_IRQ_GROUP) + return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; + + switch (type) { + case IRQ_TYPE_LEVEL_HIGH: + rb532_gpio_set_ilevel(1, gpio); + break; + case IRQ_TYPE_LEVEL_LOW: + rb532_gpio_set_ilevel(0, gpio); + break; + default: + return -EINVAL; + } + + return 0; +} + static struct irq_chip rc32434_irq_type = { .name = "RB532", .ack = rb532_disable_irq, .mask = rb532_disable_irq, .mask_ack = rb532_mask_and_ack_irq, .unmask = rb532_enable_irq, + .set_type = rb532_set_type, }; void __init arch_init_irq(void) diff --git a/arch/mips/rb532/serial.c b/arch/mips/rb532/serial.c index 3e0d7ec3a579..00ed19f0bdb5 100644 --- a/arch/mips/rb532/serial.c +++ b/arch/mips/rb532/serial.c @@ -36,7 +36,7 @@ extern unsigned int idt_cpu_freq; static struct uart_port rb532_uart = { - .type = PORT_16550A, + .flags = UPF_BOOT_AUTOCONF, .line = 0, .irq = UART0_IRQ, .iotype = UPIO_MEM, diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c index 6c0049a5bbc1..55440967b3a8 100644 --- a/arch/mips/txx9/generic/setup_tx4939.c +++ b/arch/mips/txx9/generic/setup_tx4939.c @@ -435,6 +435,28 @@ void __init tx4939_ata_init(void) platform_device_register(&ata1_dev); } +void __init tx4939_rtc_init(void) +{ + static struct resource res[] = { + { + .start = TX4939_RTC_REG & 0xfffffffffULL, + .end = (TX4939_RTC_REG & 0xfffffffffULL) + 0x100 - 1, + .flags = IORESOURCE_MEM, + }, { + .start = TXX9_IRQ_BASE + TX4939_IR_RTC, + .flags = IORESOURCE_IRQ, + }, + }; + static struct platform_device rtc_dev = { + .name = "tx4939rtc", + .id = -1, + .num_resources = ARRAY_SIZE(res), + .resource = res, + }; + + platform_device_register(&rtc_dev); +} + static void __init tx4939_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c index 98fbd9391bf8..656603b85b71 100644 --- a/arch/mips/txx9/rbtx4939/setup.c +++ b/arch/mips/txx9/rbtx4939/setup.c @@ -336,6 +336,7 @@ static void __init rbtx4939_device_init(void) rbtx4939_led_setup(); tx4939_wdt_init(); tx4939_ata_init(); + tx4939_rtc_init(); } static void __init rbtx4939_setup(void) diff --git a/drivers/char/selection.c b/drivers/char/selection.c index f29fbe9b8ed7..cb8ca5698963 100644 --- a/drivers/char/selection.c +++ b/drivers/char/selection.c @@ -268,7 +268,7 @@ int set_selection(const struct tiocl_selection __user *sel, struct tty_struct *t /* Allocate a new buffer before freeing the old one ... */ multiplier = use_unicode ? 3 : 1; /* chars can take up to 3 bytes */ - bp = kmalloc((sel_end-sel_start)/2*multiplier+1, GFP_KERNEL); + bp = kmalloc(((sel_end-sel_start)/2+1)*multiplier, GFP_KERNEL); if (!bp) { printk(KERN_WARNING "selection: kmalloc() failed\n"); clear_selection(); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 9da581452874..6915fb82d0b0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -136,7 +136,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size) obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); obj->dev = dev; - obj->filp = shmem_file_setup("drm mm object", size, 0); + obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); if (IS_ERR(obj->filp)) { kfree(obj); return NULL; diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c index f5e2e7235fcb..13ca73f96ec6 100644 --- a/drivers/net/igb/e1000_82575.c +++ b/drivers/net/igb/e1000_82575.c @@ -699,11 +699,18 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw) /* SGMII link check is done through the PCS register. */ if ((hw->phy.media_type != e1000_media_type_copper) || - (igb_sgmii_active_82575(hw))) + (igb_sgmii_active_82575(hw))) { ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, &duplex); - else + /* + * Use this flag to determine if link needs to be checked or + * not. If we have link clear the flag so that we do not + * continue to check for link. + */ + hw->mac.get_link_status = !hw->mac.serdes_has_link; + } else { ret_val = igb_check_for_copper_link(hw); + } return ret_val; } diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h index 5a27825cc48a..aebef8e48e76 100644 --- a/drivers/net/igb/igb.h +++ b/drivers/net/igb/igb.h @@ -300,11 +300,10 @@ struct igb_adapter { #define IGB_FLAG_HAS_MSI (1 << 0) #define IGB_FLAG_MSI_ENABLE (1 << 1) -#define IGB_FLAG_HAS_DCA (1 << 2) -#define IGB_FLAG_DCA_ENABLED (1 << 3) -#define IGB_FLAG_IN_NETPOLL (1 << 5) -#define IGB_FLAG_QUAD_PORT_A (1 << 6) -#define IGB_FLAG_NEED_CTX_IDX (1 << 7) +#define IGB_FLAG_DCA_ENABLED (1 << 2) +#define IGB_FLAG_IN_NETPOLL (1 << 3) +#define IGB_FLAG_QUAD_PORT_A (1 << 4) +#define IGB_FLAG_NEED_CTX_IDX (1 << 5) enum e1000_state_t { __IGB_TESTING, diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c index b82b0fb2056c..a50db5398fa5 100644 --- a/drivers/net/igb/igb_main.c +++ b/drivers/net/igb/igb_main.c @@ -206,10 +206,11 @@ static int __init igb_init_module(void) global_quad_port_a = 0; - ret = pci_register_driver(&igb_driver); #ifdef CONFIG_IGB_DCA dca_register_notify(&dca_notifier); #endif + + ret = pci_register_driver(&igb_driver); return ret; } @@ -1156,11 +1157,10 @@ static int __devinit igb_probe(struct pci_dev *pdev, /* set flags */ switch (hw->mac.type) { - case e1000_82576: case e1000_82575: - adapter->flags |= IGB_FLAG_HAS_DCA; adapter->flags |= IGB_FLAG_NEED_CTX_IDX; break; + case e1000_82576: default: break; } @@ -1310,8 +1310,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, goto err_register; #ifdef CONFIG_IGB_DCA - if ((adapter->flags & IGB_FLAG_HAS_DCA) && - (dca_add_requester(&pdev->dev) == 0)) { + if (dca_add_requester(&pdev->dev) == 0) { adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&pdev->dev, "DCA enabled\n"); /* Always use CB2 mode, difference is masked @@ -1835,11 +1834,11 @@ static void igb_setup_rctl(struct igb_adapter *adapter) rctl |= E1000_RCTL_SECRC; /* - * disable store bad packets, long packet enable, and clear size bits. + * disable store bad packets and clear size bits. */ - rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_LPE | E1000_RCTL_SZ_256); + rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); - if (adapter->netdev->mtu > ETH_DATA_LEN) + /* enable LPE when to prevent packets larger than max_frame_size */ rctl |= E1000_RCTL_LPE; /* Setup buffer sizes */ @@ -1865,7 +1864,7 @@ static void igb_setup_rctl(struct igb_adapter *adapter) */ /* allocations using alloc_page take too long for regular MTU * so only enable packet split for jumbo frames */ - if (rctl & E1000_RCTL_LPE) { + if (adapter->netdev->mtu > ETH_DATA_LEN) { adapter->rx_ps_hdr_size = IGB_RXBUFFER_128; srrctl |= adapter->rx_ps_hdr_size << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; @@ -3473,19 +3472,16 @@ static int __igb_notify_dca(struct device *dev, void *data) struct e1000_hw *hw = &adapter->hw; unsigned long event = *(unsigned long *)data; - if (!(adapter->flags & IGB_FLAG_HAS_DCA)) - goto out; - switch (event) { case DCA_PROVIDER_ADD: /* if already enabled, don't do it again */ if (adapter->flags & IGB_FLAG_DCA_ENABLED) break; - adapter->flags |= IGB_FLAG_DCA_ENABLED; /* Always use CB2 mode, difference is masked * in the CB driver. */ wr32(E1000_DCA_CTRL, 2); if (dca_add_requester(dev) == 0) { + adapter->flags |= IGB_FLAG_DCA_ENABLED; dev_info(&adapter->pdev->dev, "DCA enabled\n"); igb_setup_dca(adapter); break; @@ -3502,7 +3498,7 @@ static int __igb_notify_dca(struct device *dev, void *data) } break; } -out: + return 0; } diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index 7673fd92eaf5..ab0e09bf154d 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c @@ -676,9 +676,8 @@ static int efx_init_port(struct efx_nic *efx) rc = efx->phy_op->init(efx); if (rc) return rc; - efx->phy_op->reconfigure(efx); - mutex_lock(&efx->mac_lock); + efx->phy_op->reconfigure(efx); rc = falcon_switch_mac(efx); mutex_unlock(&efx->mac_lock); if (rc) @@ -686,7 +685,7 @@ static int efx_init_port(struct efx_nic *efx) efx->mac_op->reconfigure(efx); efx->port_initialized = true; - efx->stats_enabled = true; + efx_stats_enable(efx); return 0; fail: @@ -735,6 +734,7 @@ static void efx_fini_port(struct efx_nic *efx) if (!efx->port_initialized) return; + efx_stats_disable(efx); efx->phy_op->fini(efx); efx->port_initialized = false; @@ -1361,6 +1361,20 @@ static int efx_net_stop(struct net_device *net_dev) return 0; } +void efx_stats_disable(struct efx_nic *efx) +{ + spin_lock(&efx->stats_lock); + ++efx->stats_disable_count; + spin_unlock(&efx->stats_lock); +} + +void efx_stats_enable(struct efx_nic *efx) +{ + spin_lock(&efx->stats_lock); + --efx->stats_disable_count; + spin_unlock(&efx->stats_lock); +} + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) { @@ -1369,12 +1383,12 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev) struct net_device_stats *stats = &net_dev->stats; /* Update stats if possible, but do not wait if another thread - * is updating them (or resetting the NIC); slightly stale - * stats are acceptable. + * is updating them or if MAC stats fetches are temporarily + * disabled; slightly stale stats are acceptable. */ if (!spin_trylock(&efx->stats_lock)) return stats; - if (efx->stats_enabled) { + if (!efx->stats_disable_count) { efx->mac_op->update_stats(efx); falcon_update_nic_stats(efx); } @@ -1622,16 +1636,12 @@ static void efx_unregister_netdev(struct efx_nic *efx) /* Tears down the entire software state and most of the hardware state * before reset. */ -void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) +void efx_reset_down(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd) { EFX_ASSERT_RESET_SERIALISED(efx); - /* The net_dev->get_stats handler is quite slow, and will fail - * if a fetch is pending over reset. Serialise against it. */ - spin_lock(&efx->stats_lock); - efx->stats_enabled = false; - spin_unlock(&efx->stats_lock); - + efx_stats_disable(efx); efx_stop_all(efx); mutex_lock(&efx->mac_lock); mutex_lock(&efx->spi_lock); @@ -1639,6 +1649,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) efx->phy_op->get_settings(efx, ecmd); efx_fini_channels(efx); + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) + efx->phy_op->fini(efx); } /* This function will always ensure that the locks acquired in @@ -1646,7 +1658,8 @@ void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) * that we were unable to reinitialise the hardware, and the * driver should be disabled. If ok is false, then the rx and tx * engines are not restarted, pending a RESET_DISABLE. */ -int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) +int efx_reset_up(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd, bool ok) { int rc; @@ -1658,6 +1671,15 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) ok = false; } + if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) { + if (ok) { + rc = efx->phy_op->init(efx); + if (rc) + ok = false; + } else + efx->port_initialized = false; + } + if (ok) { efx_init_channels(efx); @@ -1670,7 +1692,7 @@ int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, bool ok) if (ok) { efx_start_all(efx); - efx->stats_enabled = true; + efx_stats_enable(efx); } return rc; } @@ -1702,7 +1724,7 @@ static int efx_reset(struct efx_nic *efx) EFX_INFO(efx, "resetting (%d)\n", method); - efx_reset_down(efx, &ecmd); + efx_reset_down(efx, method, &ecmd); rc = falcon_reset_hw(efx, method); if (rc) { @@ -1721,10 +1743,10 @@ static int efx_reset(struct efx_nic *efx) /* Leave device stopped if necessary */ if (method == RESET_TYPE_DISABLE) { - efx_reset_up(efx, &ecmd, false); + efx_reset_up(efx, method, &ecmd, false); rc = -EIO; } else { - rc = efx_reset_up(efx, &ecmd, true); + rc = efx_reset_up(efx, method, &ecmd, true); } out_disable: @@ -1876,6 +1898,7 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, efx->rx_checksum_enabled = true; spin_lock_init(&efx->netif_stop_lock); spin_lock_init(&efx->stats_lock); + efx->stats_disable_count = 1; mutex_init(&efx->mac_lock); efx->mac_op = &efx_dummy_mac_operations; efx->phy_op = &efx_dummy_phy_operations; diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h index 0dd7a532c78a..55d0f131b0e9 100644 --- a/drivers/net/sfc/efx.h +++ b/drivers/net/sfc/efx.h @@ -36,13 +36,16 @@ extern void efx_process_channel_now(struct efx_channel *channel); extern void efx_flush_queues(struct efx_nic *efx); /* Ports */ +extern void efx_stats_disable(struct efx_nic *efx); +extern void efx_stats_enable(struct efx_nic *efx); extern void efx_reconfigure_port(struct efx_nic *efx); extern void __efx_reconfigure_port(struct efx_nic *efx); /* Reset handling */ -extern void efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd); -extern int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd, - bool ok); +extern void efx_reset_down(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd); +extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, + struct ethtool_cmd *ecmd, bool ok); /* Global */ extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 53d259e90187..7b5924c039b3 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c @@ -219,9 +219,6 @@ int efx_ethtool_set_settings(struct net_device *net_dev, struct efx_nic *efx = netdev_priv(net_dev); int rc; - if (EFX_WORKAROUND_13963(efx) && !ecmd->autoneg) - return -EINVAL; - /* Falcon GMAC does not support 1000Mbps HD */ if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) { EFX_LOG(efx, "rejecting unsupported 1000Mbps HD" diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c index 5b9f2d9cc4ed..d5378e60fcdd 100644 --- a/drivers/net/sfc/falcon.c +++ b/drivers/net/sfc/falcon.c @@ -824,10 +824,6 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, rx_ev_pause_frm ? " [PAUSE]" : ""); } #endif - - if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) && - efx->phy_type == PHY_TYPE_SFX7101)) - tenxpress_crc_err(efx); } /* Handle receive events that are not in-order. */ @@ -1887,7 +1883,7 @@ static int falcon_reset_macs(struct efx_nic *efx) /* MAC stats will fail whilst the TX fifo is draining. Serialise * the drain sequence with the statistics fetch */ - spin_lock(&efx->stats_lock); + efx_stats_disable(efx); falcon_read(efx, ®, MAC0_CTRL_REG_KER); EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); @@ -1917,7 +1913,7 @@ static int falcon_reset_macs(struct efx_nic *efx) udelay(10); } - spin_unlock(&efx->stats_lock); + efx_stats_enable(efx); /* If we've reset the EM block and the link is up, then * we'll have to kick the XAUI link so the PHY can recover */ @@ -2277,6 +2273,10 @@ int falcon_switch_mac(struct efx_nic *efx) struct efx_mac_operations *old_mac_op = efx->mac_op; efx_oword_t nic_stat; unsigned strap_val; + int rc = 0; + + /* Don't try to fetch MAC stats while we're switching MACs */ + efx_stats_disable(efx); /* Internal loopbacks override the phy speed setting */ if (efx->loopback_mode == LOOPBACK_GMAC) { @@ -2287,16 +2287,12 @@ int falcon_switch_mac(struct efx_nic *efx) efx->link_fd = true; } + WARN_ON(!mutex_is_locked(&efx->mac_lock)); efx->mac_op = (EFX_IS10G(efx) ? &falcon_xmac_operations : &falcon_gmac_operations); - if (old_mac_op == efx->mac_op) - return 0; - - WARN_ON(!mutex_is_locked(&efx->mac_lock)); - - /* Not all macs support a mac-level link state */ - efx->mac_up = true; + /* Always push the NIC_STAT_REG setting even if the mac hasn't + * changed, because this function is run post online reset */ falcon_read(efx, &nic_stat, NIC_STAT_REG); strap_val = EFX_IS10G(efx) ? 5 : 3; if (falcon_rev(efx) >= FALCON_REV_B0) { @@ -2309,9 +2305,17 @@ int falcon_switch_mac(struct efx_nic *efx) BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); } + if (old_mac_op == efx->mac_op) + goto out; EFX_LOG(efx, "selected %cMAC\n", EFX_IS10G(efx) ? 'X' : 'G'); - return falcon_reset_macs(efx); + /* Not all macs support a mac-level link state */ + efx->mac_up = true; + + rc = falcon_reset_macs(efx); +out: + efx_stats_enable(efx); + return rc; } /* This call is responsible for hooking in the MAC and PHY operations */ diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c index f6a16428113d..f9e2f95c3b48 100644 --- a/drivers/net/sfc/mdio_10g.c +++ b/drivers/net/sfc/mdio_10g.c @@ -15,6 +15,7 @@ #include "net_driver.h" #include "mdio_10g.h" #include "boards.h" +#include "workarounds.h" int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, int spins, int spintime) @@ -179,17 +180,12 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) return false; else if (efx_phy_mode_disabled(efx->phy_mode)) return false; - else if (efx->loopback_mode == LOOPBACK_PHYXS) { + else if (efx->loopback_mode == LOOPBACK_PHYXS) mmd_mask &= ~(MDIO_MMDREG_DEVS_PHYXS | MDIO_MMDREG_DEVS_PCS | MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_AN); - if (!mmd_mask) { - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, - MDIO_PHYXS_STATUS2); - return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN)); - } - } else if (efx->loopback_mode == LOOPBACK_PCS) + else if (efx->loopback_mode == LOOPBACK_PCS) mmd_mask &= ~(MDIO_MMDREG_DEVS_PCS | MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_AN); @@ -197,6 +193,13 @@ bool mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) mmd_mask &= ~(MDIO_MMDREG_DEVS_PMAPMD | MDIO_MMDREG_DEVS_AN); + if (!mmd_mask) { + /* Use presence of XGMII faults in leui of link state */ + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PHYXS, + MDIO_PHYXS_STATUS2); + return !(reg & (1 << MDIO_PHYXS_STATUS2_RX_FAULT_LBN)); + } + while (mmd_mask) { if (mmd_mask & 1) { /* Double reads because link state is latched, and a @@ -263,7 +266,7 @@ void mdio_clause45_set_mmds_lpower(struct efx_nic *efx, } } -static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) +static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr) { int phy_id = efx->mii.phy_id; u32 result = 0; @@ -278,9 +281,6 @@ static u32 mdio_clause45_get_an(struct efx_nic *efx, u16 addr, u32 xnp) result |= ADVERTISED_100baseT_Half; if (reg & ADVERTISE_100FULL) result |= ADVERTISED_100baseT_Full; - if (reg & LPA_RESV) - result |= xnp; - return result; } @@ -310,7 +310,7 @@ void mdio_clause45_get_settings(struct efx_nic *efx, */ void mdio_clause45_get_settings_ext(struct efx_nic *efx, struct ethtool_cmd *ecmd, - u32 xnp, u32 xnp_lpa) + u32 npage_adv, u32 npage_lpa) { int phy_id = efx->mii.phy_id; int reg; @@ -361,8 +361,8 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx, ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= ADVERTISED_Autoneg | - mdio_clause45_get_an(efx, - MDIO_AN_ADVERTISE, xnp); + mdio_clause45_get_an(efx, MDIO_AN_ADVERTISE) | + npage_adv; } else ecmd->autoneg = AUTONEG_DISABLE; } else @@ -371,27 +371,30 @@ void mdio_clause45_get_settings_ext(struct efx_nic *efx, if (ecmd->autoneg) { /* If AN is complete, report best common mode, * otherwise report best advertised mode. */ - u32 common = ecmd->advertising; + u32 modes = 0; if (mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, MDIO_MMDREG_STAT1) & - (1 << MDIO_AN_STATUS_AN_DONE_LBN)) { - common &= mdio_clause45_get_an(efx, MDIO_AN_LPA, - xnp_lpa); - } - if (common & ADVERTISED_10000baseT_Full) { + (1 << MDIO_AN_STATUS_AN_DONE_LBN)) + modes = (ecmd->advertising & + (mdio_clause45_get_an(efx, MDIO_AN_LPA) | + npage_lpa)); + if (modes == 0) + modes = ecmd->advertising; + + if (modes & ADVERTISED_10000baseT_Full) { ecmd->speed = SPEED_10000; ecmd->duplex = DUPLEX_FULL; - } else if (common & (ADVERTISED_1000baseT_Full | - ADVERTISED_1000baseT_Half)) { + } else if (modes & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { ecmd->speed = SPEED_1000; - ecmd->duplex = !!(common & ADVERTISED_1000baseT_Full); - } else if (common & (ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half)) { + ecmd->duplex = !!(modes & ADVERTISED_1000baseT_Full); + } else if (modes & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { ecmd->speed = SPEED_100; - ecmd->duplex = !!(common & ADVERTISED_100baseT_Full); + ecmd->duplex = !!(modes & ADVERTISED_100baseT_Full); } else { ecmd->speed = SPEED_10; - ecmd->duplex = !!(common & ADVERTISED_10baseT_Full); + ecmd->duplex = !!(modes & ADVERTISED_10baseT_Full); } } else { /* Report forced settings */ @@ -415,7 +418,7 @@ int mdio_clause45_set_settings(struct efx_nic *efx, int phy_id = efx->mii.phy_id; struct ethtool_cmd prev; u32 required; - int ctrl1_bits, reg; + int reg; efx->phy_op->get_settings(efx, &prev); @@ -430,99 +433,83 @@ int mdio_clause45_set_settings(struct efx_nic *efx, if (prev.port != PORT_TP || ecmd->port != PORT_TP) return -EINVAL; - /* Check that PHY supports these settings and work out the - * basic control bits */ - if (ecmd->duplex) { + /* Check that PHY supports these settings */ + if (ecmd->autoneg) { + required = SUPPORTED_Autoneg; + } else if (ecmd->duplex) { switch (ecmd->speed) { - case SPEED_10: - ctrl1_bits = BMCR_FULLDPLX; - required = SUPPORTED_10baseT_Full; - break; - case SPEED_100: - ctrl1_bits = BMCR_SPEED100 | BMCR_FULLDPLX; - required = SUPPORTED_100baseT_Full; - break; - case SPEED_1000: - ctrl1_bits = BMCR_SPEED1000 | BMCR_FULLDPLX; - required = SUPPORTED_1000baseT_Full; - break; - case SPEED_10000: - ctrl1_bits = (BMCR_SPEED1000 | BMCR_SPEED100 | - BMCR_FULLDPLX); - required = SUPPORTED_10000baseT_Full; - break; - default: - return -EINVAL; + case SPEED_10: required = SUPPORTED_10baseT_Full; break; + case SPEED_100: required = SUPPORTED_100baseT_Full; break; + default: return -EINVAL; } } else { switch (ecmd->speed) { - case SPEED_10: - ctrl1_bits = 0; - required = SUPPORTED_10baseT_Half; - break; - case SPEED_100: - ctrl1_bits = BMCR_SPEED100; - required = SUPPORTED_100baseT_Half; - break; - case SPEED_1000: - ctrl1_bits = BMCR_SPEED1000; - required = SUPPORTED_1000baseT_Half; - break; - default: - return -EINVAL; + case SPEED_10: required = SUPPORTED_10baseT_Half; break; + case SPEED_100: required = SUPPORTED_100baseT_Half; break; + default: return -EINVAL; } } - if (ecmd->autoneg) - required |= SUPPORTED_Autoneg; required |= ecmd->advertising; if (required & ~prev.supported) return -EINVAL; - /* Set the basic control bits */ - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, - MDIO_MMDREG_CTRL1); - reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | 0x003c); - reg |= ctrl1_bits; - mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL1, - reg); - - /* Set the AN registers */ - if (ecmd->autoneg != prev.autoneg || - ecmd->advertising != prev.advertising) { - bool xnp = false; - - if (efx->phy_op->set_xnp_advertise) - xnp = efx->phy_op->set_xnp_advertise(efx, - ecmd->advertising); - - if (ecmd->autoneg) { - reg = 0; - if (ecmd->advertising & ADVERTISED_10baseT_Half) - reg |= ADVERTISE_10HALF; - if (ecmd->advertising & ADVERTISED_10baseT_Full) - reg |= ADVERTISE_10FULL; - if (ecmd->advertising & ADVERTISED_100baseT_Half) - reg |= ADVERTISE_100HALF; - if (ecmd->advertising & ADVERTISED_100baseT_Full) - reg |= ADVERTISE_100FULL; - if (xnp) - reg |= ADVERTISE_RESV; - mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, - MDIO_AN_ADVERTISE, reg); - } + if (ecmd->autoneg) { + bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full + || EFX_WORKAROUND_13204(efx)); + + /* Set up the base page */ + reg = ADVERTISE_CSMA; + if (ecmd->advertising & ADVERTISED_10baseT_Half) + reg |= ADVERTISE_10HALF; + if (ecmd->advertising & ADVERTISED_10baseT_Full) + reg |= ADVERTISE_10FULL; + if (ecmd->advertising & ADVERTISED_100baseT_Half) + reg |= ADVERTISE_100HALF; + if (ecmd->advertising & ADVERTISED_100baseT_Full) + reg |= ADVERTISE_100FULL; + if (xnp) + reg |= ADVERTISE_RESV; + else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) + reg |= ADVERTISE_NPAGE; + reg |= efx_fc_advertise(efx->wanted_fc); + mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_ADVERTISE, reg); + + /* Set up the (extended) next page if necessary */ + if (efx->phy_op->set_npage_adv) + efx->phy_op->set_npage_adv(efx, ecmd->advertising); + /* Enable and restart AN */ reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, MDIO_MMDREG_CTRL1); - if (ecmd->autoneg) - reg |= BMCR_ANENABLE | BMCR_ANRESTART; - else - reg &= ~BMCR_ANENABLE; + reg |= BMCR_ANENABLE; + if (!(EFX_WORKAROUND_15195(efx) && + LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) + reg |= BMCR_ANRESTART; if (xnp) reg |= 1 << MDIO_AN_CTRL_XNP_LBN; else reg &= ~(1 << MDIO_AN_CTRL_XNP_LBN); mdio_clause45_write(efx, phy_id, MDIO_MMD_AN, MDIO_MMDREG_CTRL1, reg); + } else { + /* Disable AN */ + mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN, + MDIO_MMDREG_CTRL1, + __ffs(BMCR_ANENABLE), false); + + /* Set the basic control bits */ + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1); + reg &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX | + 0x003c); + if (ecmd->speed == SPEED_100) + reg |= BMCR_SPEED100; + if (ecmd->duplex) + reg |= BMCR_FULLDPLX; + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, + MDIO_MMDREG_CTRL1, reg); } return 0; diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h index 09bf801d0569..8ba49773ce7e 100644 --- a/drivers/net/sfc/mdio_10g.h +++ b/drivers/net/sfc/mdio_10g.h @@ -155,7 +155,8 @@ #define MDIO_AN_XNP 22 #define MDIO_AN_LPA_XNP 25 -#define MDIO_AN_10GBT_ADVERTISE 32 +#define MDIO_AN_10GBT_CTRL 32 +#define MDIO_AN_10GBT_CTRL_ADV_10G_LBN 12 #define MDIO_AN_10GBT_STATUS (33) #define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ #define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h index 5f255f75754e..e019ad1fb9a0 100644 --- a/drivers/net/sfc/net_driver.h +++ b/drivers/net/sfc/net_driver.h @@ -566,7 +566,7 @@ struct efx_mac_operations { * @poll: Poll for hardware state. Serialised by the mac_lock. * @get_settings: Get ethtool settings. Serialised by the mac_lock. * @set_settings: Set ethtool settings. Serialised by the mac_lock. - * @set_xnp_advertise: Set abilities advertised in Extended Next Page + * @set_npage_adv: Set abilities advertised in (Extended) Next Page * (only needed where AN bit is set in mmds) * @num_tests: Number of PHY-specific tests/results * @test_names: Names of the tests/results @@ -586,7 +586,7 @@ struct efx_phy_operations { struct ethtool_cmd *ecmd); int (*set_settings) (struct efx_nic *efx, struct ethtool_cmd *ecmd); - bool (*set_xnp_advertise) (struct efx_nic *efx, u32); + void (*set_npage_adv) (struct efx_nic *efx, u32); u32 num_tests; const char *const *test_names; int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags); @@ -754,8 +754,7 @@ union efx_multicast_hash { * &struct net_device_stats. * @stats_buffer: DMA buffer for statistics * @stats_lock: Statistics update lock. Serialises statistics fetches - * @stats_enabled: Temporarily disable statistics fetches. - * Serialised by @stats_lock + * @stats_disable_count: Nest count for disabling statistics fetches * @mac_op: MAC interface * @mac_address: Permanent MAC address * @phy_type: PHY type @@ -837,7 +836,7 @@ struct efx_nic { struct efx_mac_stats mac_stats; struct efx_buffer stats_buffer; spinlock_t stats_lock; - bool stats_enabled; + unsigned int stats_disable_count; struct efx_mac_operations *mac_op; unsigned char mac_address[ETH_ALEN]; diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h index 58c493ef81bb..07e855c148bc 100644 --- a/drivers/net/sfc/phy.h +++ b/drivers/net/sfc/phy.h @@ -17,7 +17,6 @@ extern struct efx_phy_operations falcon_sfx7101_phy_ops; extern struct efx_phy_operations falcon_sft9001_phy_ops; extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink); -extern void tenxpress_crc_err(struct efx_nic *efx); /**************************************************************************** * Exported functions from the driver for XFP optical PHYs diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c index dba0d64d50cd..0a598084c513 100644 --- a/drivers/net/sfc/selftest.c +++ b/drivers/net/sfc/selftest.c @@ -665,6 +665,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, { enum efx_loopback_mode loopback_mode = efx->loopback_mode; int phy_mode = efx->phy_mode; + enum reset_type reset_method = RESET_TYPE_INVISIBLE; struct ethtool_cmd ecmd; struct efx_channel *channel; int rc_test = 0, rc_reset = 0, rc; @@ -718,21 +719,21 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests, mutex_unlock(&efx->mac_lock); /* free up all consumers of SRAM (including all the queues) */ - efx_reset_down(efx, &ecmd); + efx_reset_down(efx, reset_method, &ecmd); rc = efx_test_chip(efx, tests); if (rc && !rc_test) rc_test = rc; /* reset the chip to recover from the register test */ - rc_reset = falcon_reset_hw(efx, RESET_TYPE_ALL); + rc_reset = falcon_reset_hw(efx, reset_method); /* Ensure that the phy is powered and out of loopback * for the bist and loopback tests */ efx->phy_mode &= ~PHY_MODE_LOW_POWER; efx->loopback_mode = LOOPBACK_NONE; - rc = efx_reset_up(efx, &ecmd, rc_reset == 0); + rc = efx_reset_up(efx, reset_method, &ecmd, rc_reset == 0); if (rc && !rc_reset) rc_reset = rc; diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c index 16b80acb9992..cb25ae5b257a 100644 --- a/drivers/net/sfc/sfe4001.c +++ b/drivers/net/sfc/sfe4001.c @@ -186,19 +186,22 @@ static int sfn4111t_reset(struct efx_nic *efx) { efx_oword_t reg; - /* GPIO pins are also used for I2C, so block that temporarily */ + /* GPIO 3 and the GPIO register are shared with I2C, so block that */ mutex_lock(&efx->i2c_adap.bus_lock); + /* Pull RST_N (GPIO 2) low then let it up again, setting the + * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the + * output enables; the output levels should always be 0 (low) + * and we rely on external pull-ups. */ falcon_read(efx, ®, GPIO_CTL_REG_KER); EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); - EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, false); falcon_write(efx, ®, GPIO_CTL_REG_KER); msleep(1000); - EFX_SET_OWORD_FIELD(reg, GPIO2_OUT, true); - EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, true); - EFX_SET_OWORD_FIELD(reg, GPIO3_OUT, - !(efx->phy_mode & PHY_MODE_SPECIAL)); + EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false); + EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, + !!(efx->phy_mode & PHY_MODE_SPECIAL)); falcon_write(efx, ®, GPIO_CTL_REG_KER); + msleep(1); mutex_unlock(&efx->i2c_adap.bus_lock); @@ -232,12 +235,18 @@ static ssize_t set_phy_flash_cfg(struct device *dev, } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { err = -EBUSY; } else { + /* Reset the PHY, reconfigure the MAC and enable/disable + * MAC stats accordingly. */ efx->phy_mode = new_mode; + if (new_mode & PHY_MODE_SPECIAL) + efx_stats_disable(efx); if (efx->board_info.type == EFX_BOARD_SFE4001) err = sfe4001_poweron(efx); else err = sfn4111t_reset(efx); efx_reconfigure_port(efx); + if (!(new_mode & PHY_MODE_SPECIAL)) + efx_stats_enable(efx); } rtnl_unlock(); @@ -326,6 +335,11 @@ int sfe4001_init(struct efx_nic *efx) efx->board_info.monitor = sfe4001_check_hw; efx->board_info.fini = sfe4001_fini; + if (efx->phy_mode & PHY_MODE_SPECIAL) { + /* PHY won't generate a 156.25 MHz clock and MAC stats fetch + * will fail. */ + efx_stats_disable(efx); + } rc = sfe4001_poweron(efx); if (rc) goto fail_ioexp; @@ -372,17 +386,25 @@ static void sfn4111t_fini(struct efx_nic *efx) i2c_unregister_device(efx->board_info.hwmon_client); } -static struct i2c_board_info sfn4111t_hwmon_info = { +static struct i2c_board_info sfn4111t_a0_hwmon_info = { I2C_BOARD_INFO("max6647", 0x4e), .irq = -1, }; +static struct i2c_board_info sfn4111t_r5_hwmon_info = { + I2C_BOARD_INFO("max6646", 0x4d), + .irq = -1, +}; + int sfn4111t_init(struct efx_nic *efx) { int rc; efx->board_info.hwmon_client = - i2c_new_device(&efx->i2c_adap, &sfn4111t_hwmon_info); + i2c_new_device(&efx->i2c_adap, + (efx->board_info.minor < 5) ? + &sfn4111t_a0_hwmon_info : + &sfn4111t_r5_hwmon_info); if (!efx->board_info.hwmon_client) return -EIO; @@ -394,8 +416,10 @@ int sfn4111t_init(struct efx_nic *efx) if (rc) goto fail_hwmon; - if (efx->phy_mode & PHY_MODE_SPECIAL) + if (efx->phy_mode & PHY_MODE_SPECIAL) { + efx_stats_disable(efx); sfn4111t_reset(efx); + } return 0; diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c index 9ecb77da9545..f0efd246962c 100644 --- a/drivers/net/sfc/tenxpress.c +++ b/drivers/net/sfc/tenxpress.c @@ -67,6 +67,8 @@ #define PMA_PMD_EXT_CLK312_WIDTH 1 #define PMA_PMD_EXT_LPOWER_LBN 12 #define PMA_PMD_EXT_LPOWER_WIDTH 1 +#define PMA_PMD_EXT_ROBUST_LBN 14 +#define PMA_PMD_EXT_ROBUST_WIDTH 1 #define PMA_PMD_EXT_SSR_LBN 15 #define PMA_PMD_EXT_SSR_WIDTH 1 @@ -177,35 +179,24 @@ #define C22EXT_STATUS_LINK_LBN 2 #define C22EXT_STATUS_LINK_WIDTH 1 -#define C22EXT_MSTSLV_REG 49162 -#define C22EXT_MSTSLV_1000_HD_LBN 10 -#define C22EXT_MSTSLV_1000_HD_WIDTH 1 -#define C22EXT_MSTSLV_1000_FD_LBN 11 -#define C22EXT_MSTSLV_1000_FD_WIDTH 1 +#define C22EXT_MSTSLV_CTRL 49161 +#define C22EXT_MSTSLV_CTRL_ADV_1000_HD_LBN 8 +#define C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN 9 + +#define C22EXT_MSTSLV_STATUS 49162 +#define C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN 10 +#define C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN 11 /* Time to wait between powering down the LNPGA and turning off the power * rails */ #define LNPGA_PDOWN_WAIT (HZ / 5) -static int crc_error_reset_threshold = 100; -module_param(crc_error_reset_threshold, int, 0644); -MODULE_PARM_DESC(crc_error_reset_threshold, - "Max number of CRC errors before XAUI reset"); - struct tenxpress_phy_data { enum efx_loopback_mode loopback_mode; - atomic_t bad_crc_count; enum efx_phy_mode phy_mode; int bad_lp_tries; }; -void tenxpress_crc_err(struct efx_nic *efx) -{ - struct tenxpress_phy_data *phy_data = efx->phy_data; - if (phy_data != NULL) - atomic_inc(&phy_data->bad_crc_count); -} - static ssize_t show_phy_short_reach(struct device *dev, struct device_attribute *attr, char *buf) { @@ -284,7 +275,9 @@ static int tenxpress_init(struct efx_nic *efx) PMA_PMD_XCONTROL_REG); reg |= ((1 << PMA_PMD_EXT_GMII_EN_LBN) | (1 << PMA_PMD_EXT_CLK_OUT_LBN) | - (1 << PMA_PMD_EXT_CLK312_LBN)); + (1 << PMA_PMD_EXT_CLK312_LBN) | + (1 << PMA_PMD_EXT_ROBUST_LBN)); + mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, @@ -346,6 +339,7 @@ static int tenxpress_phy_init(struct efx_nic *efx) rc = tenxpress_init(efx); if (rc < 0) goto fail; + mdio_clause45_set_pause(efx); if (efx->phy_type == PHY_TYPE_SFT9001B) { rc = device_create_file(&efx->pci_dev->dev, @@ -376,8 +370,8 @@ static int tenxpress_special_reset(struct efx_nic *efx) /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so * a special software reset can glitch the XGMAC sufficiently for stats - * requests to fail. Since we don't often special_reset, just lock. */ - spin_lock(&efx->stats_lock); + * requests to fail. */ + efx_stats_disable(efx); /* Initiate reset */ reg = mdio_clause45_read(efx, efx->mii.phy_id, @@ -392,17 +386,17 @@ static int tenxpress_special_reset(struct efx_nic *efx) rc = mdio_clause45_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); if (rc < 0) - goto unlock; + goto out; /* Try and reconfigure the device */ rc = tenxpress_init(efx); if (rc < 0) - goto unlock; + goto out; /* Wait for the XGXS state machine to churn */ mdelay(10); -unlock: - spin_unlock(&efx->stats_lock); +out: + efx_stats_enable(efx); return rc; } @@ -520,7 +514,7 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) { struct tenxpress_phy_data *phy_data = efx->phy_data; struct ethtool_cmd ecmd; - bool phy_mode_change, loop_reset, loop_toggle, loopback; + bool phy_mode_change, loop_reset; if (efx->phy_mode & (PHY_MODE_OFF | PHY_MODE_SPECIAL)) { phy_data->phy_mode = efx->phy_mode; @@ -531,12 +525,10 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) phy_mode_change = (efx->phy_mode == PHY_MODE_NORMAL && phy_data->phy_mode != PHY_MODE_NORMAL); - loopback = LOOPBACK_MASK(efx) & efx->phy_op->loopbacks; - loop_toggle = LOOPBACK_CHANGED(phy_data, efx, efx->phy_op->loopbacks); loop_reset = (LOOPBACK_OUT_OF(phy_data, efx, efx->phy_op->loopbacks) || LOOPBACK_CHANGED(phy_data, efx, 1 << LOOPBACK_GPHY)); - if (loop_reset || loop_toggle || loopback || phy_mode_change) { + if (loop_reset || phy_mode_change) { int rc; efx->phy_op->get_settings(efx, &ecmd); @@ -551,20 +543,6 @@ static void tenxpress_phy_reconfigure(struct efx_nic *efx) falcon_reset_xaui(efx); } - if (efx->phy_type != PHY_TYPE_SFX7101) { - /* Only change autoneg once, on coming out or - * going into loopback */ - if (loop_toggle) - ecmd.autoneg = !loopback; - if (loopback) { - ecmd.duplex = DUPLEX_FULL; - if (efx->loopback_mode == LOOPBACK_GPHY) - ecmd.speed = SPEED_1000; - else - ecmd.speed = SPEED_10000; - } - } - rc = efx->phy_op->set_settings(efx, &ecmd); WARN_ON(rc); } @@ -623,13 +601,6 @@ static void tenxpress_phy_poll(struct efx_nic *efx) if (phy_data->phy_mode != PHY_MODE_NORMAL) return; - - if (EFX_WORKAROUND_10750(efx) && - atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { - EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n"); - falcon_reset_xaui(efx); - atomic_set(&phy_data->bad_crc_count, 0); - } } static void tenxpress_phy_fini(struct efx_nic *efx) @@ -772,107 +743,76 @@ reset: return rc; } -static u32 tenxpress_get_xnp_lpa(struct efx_nic *efx) +static void +tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { - int phy = efx->mii.phy_id; - u32 lpa = 0; + int phy_id = efx->mii.phy_id; + u32 adv = 0, lpa = 0; int reg; if (efx->phy_type != PHY_TYPE_SFX7101) { - reg = mdio_clause45_read(efx, phy, MDIO_MMD_C22EXT, - C22EXT_MSTSLV_REG); - if (reg & (1 << C22EXT_MSTSLV_1000_HD_LBN)) + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, + C22EXT_MSTSLV_CTRL); + if (reg & (1 << C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN)) + adv |= ADVERTISED_1000baseT_Full; + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, + C22EXT_MSTSLV_STATUS); + if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_HD_LBN)) lpa |= ADVERTISED_1000baseT_Half; - if (reg & (1 << C22EXT_MSTSLV_1000_FD_LBN)) + if (reg & (1 << C22EXT_MSTSLV_STATUS_LP_1000_FD_LBN)) lpa |= ADVERTISED_1000baseT_Full; } - reg = mdio_clause45_read(efx, phy, MDIO_MMD_AN, MDIO_AN_10GBT_STATUS); + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_CTRL); + if (reg & (1 << MDIO_AN_10GBT_CTRL_ADV_10G_LBN)) + adv |= ADVERTISED_10000baseT_Full; + reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_STATUS); if (reg & (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)) lpa |= ADVERTISED_10000baseT_Full; - return lpa; -} -static void sfx7101_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) -{ - mdio_clause45_get_settings_ext(efx, ecmd, ADVERTISED_10000baseT_Full, - tenxpress_get_xnp_lpa(efx)); - ecmd->supported |= SUPPORTED_10000baseT_Full; - ecmd->advertising |= ADVERTISED_10000baseT_Full; + mdio_clause45_get_settings_ext(efx, ecmd, adv, lpa); + + if (efx->phy_type != PHY_TYPE_SFX7101) + ecmd->supported |= (SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full); + + /* In loopback, the PHY automatically brings up the correct interface, + * but doesn't advertise the correct speed. So override it */ + if (efx->loopback_mode == LOOPBACK_GPHY) + ecmd->speed = SPEED_1000; + else if (LOOPBACK_MASK(efx) & efx->phy_op->loopbacks) + ecmd->speed = SPEED_10000; } -static void sft9001_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +static int tenxpress_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) { - int phy_id = efx->mii.phy_id; - u32 xnp_adv = 0; - int reg; - - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, - PMA_PMD_SPEED_ENABLE_REG); - if (EFX_WORKAROUND_13204(efx) && (reg & (1 << PMA_PMD_100TX_ADV_LBN))) - xnp_adv |= ADVERTISED_100baseT_Full; - if (reg & (1 << PMA_PMD_1000T_ADV_LBN)) - xnp_adv |= ADVERTISED_1000baseT_Full; - if (reg & (1 << PMA_PMD_10000T_ADV_LBN)) - xnp_adv |= ADVERTISED_10000baseT_Full; - - mdio_clause45_get_settings_ext(efx, ecmd, xnp_adv, - tenxpress_get_xnp_lpa(efx)); - - ecmd->supported |= (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Full); + if (!ecmd->autoneg) + return -EINVAL; - /* Use the vendor defined C22ext register for duplex settings */ - if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) { - reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_C22EXT, - GPHY_XCONTROL_REG); - ecmd->duplex = (reg & (1 << GPHY_DUPLEX_LBN) ? - DUPLEX_FULL : DUPLEX_HALF); - } + return mdio_clause45_set_settings(efx, ecmd); } -static int sft9001_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) +static void sfx7101_set_npage_adv(struct efx_nic *efx, u32 advertising) { - int phy_id = efx->mii.phy_id; - int rc; - - rc = mdio_clause45_set_settings(efx, ecmd); - if (rc) - return rc; - - if (ecmd->speed != SPEED_10000 && !ecmd->autoneg) - mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, - GPHY_XCONTROL_REG, GPHY_DUPLEX_LBN, - ecmd->duplex == DUPLEX_FULL); - - return rc; + mdio_clause45_set_flag(efx, efx->mii.phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV_10G_LBN, + advertising & ADVERTISED_10000baseT_Full); } -static bool sft9001_set_xnp_advertise(struct efx_nic *efx, u32 advertising) +static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising) { - int phy = efx->mii.phy_id; - int reg = mdio_clause45_read(efx, phy, MDIO_MMD_PMAPMD, - PMA_PMD_SPEED_ENABLE_REG); - bool enabled; - - reg &= ~((1 << 2) | (1 << 3)); - if (EFX_WORKAROUND_13204(efx) && - (advertising & ADVERTISED_100baseT_Full)) - reg |= 1 << PMA_PMD_100TX_ADV_LBN; - if (advertising & ADVERTISED_1000baseT_Full) - reg |= 1 << PMA_PMD_1000T_ADV_LBN; - if (advertising & ADVERTISED_10000baseT_Full) - reg |= 1 << PMA_PMD_10000T_ADV_LBN; - mdio_clause45_write(efx, phy, MDIO_MMD_PMAPMD, - PMA_PMD_SPEED_ENABLE_REG, reg); - - enabled = (advertising & - (ADVERTISED_1000baseT_Half | - ADVERTISED_1000baseT_Full | - ADVERTISED_10000baseT_Full)); - if (EFX_WORKAROUND_13204(efx)) - enabled |= (advertising & ADVERTISED_100baseT_Full); - return enabled; + int phy_id = efx->mii.phy_id; + + mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_C22EXT, + C22EXT_MSTSLV_CTRL, + C22EXT_MSTSLV_CTRL_ADV_1000_FD_LBN, + advertising & ADVERTISED_1000baseT_Full); + mdio_clause45_set_flag(efx, phy_id, MDIO_MMD_AN, + MDIO_AN_10GBT_CTRL, + MDIO_AN_10GBT_CTRL_ADV_10G_LBN, + advertising & ADVERTISED_10000baseT_Full); } struct efx_phy_operations falcon_sfx7101_phy_ops = { @@ -882,8 +822,9 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = { .poll = tenxpress_phy_poll, .fini = tenxpress_phy_fini, .clear_interrupt = efx_port_dummy_op_void, - .get_settings = sfx7101_get_settings, - .set_settings = mdio_clause45_set_settings, + .get_settings = tenxpress_get_settings, + .set_settings = tenxpress_set_settings, + .set_npage_adv = sfx7101_set_npage_adv, .num_tests = ARRAY_SIZE(sfx7101_test_names), .test_names = sfx7101_test_names, .run_tests = sfx7101_run_tests, @@ -898,9 +839,9 @@ struct efx_phy_operations falcon_sft9001_phy_ops = { .poll = tenxpress_phy_poll, .fini = tenxpress_phy_fini, .clear_interrupt = efx_port_dummy_op_void, - .get_settings = sft9001_get_settings, - .set_settings = sft9001_set_settings, - .set_xnp_advertise = sft9001_set_xnp_advertise, + .get_settings = tenxpress_get_settings, + .set_settings = tenxpress_set_settings, + .set_npage_adv = sft9001_set_npage_adv, .num_tests = ARRAY_SIZE(sft9001_test_names), .test_names = sft9001_test_names, .run_tests = sft9001_run_tests, diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h index 82e03e1d7371..78de68f4a95b 100644 --- a/drivers/net/sfc/workarounds.h +++ b/drivers/net/sfc/workarounds.h @@ -18,8 +18,8 @@ #define EFX_WORKAROUND_ALWAYS(efx) 1 #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) #define EFX_WORKAROUND_10G(efx) EFX_IS10G(efx) -#define EFX_WORKAROUND_SFX7101(efx) ((efx)->phy_type == PHY_TYPE_SFX7101) -#define EFX_WORKAROUND_SFT9001A(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A) +#define EFX_WORKAROUND_SFT9001(efx) ((efx)->phy_type == PHY_TYPE_SFT9001A || \ + (efx)->phy_type == PHY_TYPE_SFT9001B) /* XAUI resets if link not detected */ #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS @@ -29,8 +29,6 @@ #define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G /* TX pkt parser problem with <= 16 byte TXes */ #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS -/* Low rate CRC errors require XAUI reset */ -#define EFX_WORKAROUND_10750 EFX_WORKAROUND_SFX7101 /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor * or a PCIe error (bug 11028) */ #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS @@ -55,8 +53,8 @@ #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A /* Need to send XNP pages for 100BaseT */ -#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001A -/* Need to keep AN enabled */ -#define EFX_WORKAROUND_13963 EFX_WORKAROUND_SFT9001A +#define EFX_WORKAROUND_13204 EFX_WORKAROUND_SFT9001 +/* Don't restart AN in near-side loopback */ +#define EFX_WORKAROUND_15195 EFX_WORKAROUND_SFT9001 #endif /* EFX_WORKAROUNDS_H */ diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 3668e81e474d..994703cc0db3 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1403,9 +1403,6 @@ static int sky2_up(struct net_device *dev) } - if (netif_msg_ifup(sky2)) - printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); - netif_carrier_off(dev); /* must be power of 2 */ @@ -1484,6 +1481,9 @@ static int sky2_up(struct net_device *dev) sky2_write32(hw, B0_IMSK, imask); sky2_set_multicast(dev); + + if (netif_msg_ifup(sky2)) + printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); return 0; err_out: diff --git a/ipc/shm.c b/ipc/shm.c index a9e09ad2263e..c0a021f7f41a 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -368,14 +368,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) file = hugetlb_file_setup(name, size); shp->mlock_user = current_user(); } else { - int acctflag = VM_ACCOUNT; + int acctflag = 0; /* * Do not allow no accounting for OVERCOMMIT_NEVER, even * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) - acctflag = 0; + acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); diff --git a/mm/mmap.c b/mm/mmap.c index c581df14d0de..214b6a258eeb 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1090,6 +1090,15 @@ int vma_wants_writenotify(struct vm_area_struct *vma) mapping_cap_account_dirty(vma->vm_file->f_mapping); } +/* + * We account for memory if it's a private writeable mapping, + * and VM_NORESERVE wasn't set. + */ +static inline int accountable_mapping(unsigned int vm_flags) +{ + return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE; +} + unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, unsigned long flags, unsigned int vm_flags, unsigned long pgoff, @@ -1117,23 +1126,24 @@ munmap_back: if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; - if (flags & MAP_NORESERVE) + /* + * Set 'VM_NORESERVE' if we should not account for the + * memory use of this mapping. We only honor MAP_NORESERVE + * if we're allowed to overcommit memory. + */ + if ((flags & MAP_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) + vm_flags |= VM_NORESERVE; + if (!accountable) vm_flags |= VM_NORESERVE; - if (accountable && (!(flags & MAP_NORESERVE) || - sysctl_overcommit_memory == OVERCOMMIT_NEVER)) { - if (vm_flags & VM_SHARED) { - /* Check memory availability in shmem_file_setup? */ - vm_flags |= VM_ACCOUNT; - } else if (vm_flags & VM_WRITE) { - /* - * Private writable mapping: check memory availability - */ - charged = len >> PAGE_SHIFT; - if (security_vm_enough_memory(charged)) - return -ENOMEM; - vm_flags |= VM_ACCOUNT; - } + /* + * Private writable mapping: check memory availability + */ + if (accountable_mapping(vm_flags)) { + charged = len >> PAGE_SHIFT; + if (security_vm_enough_memory(charged)) + return -ENOMEM; + vm_flags |= VM_ACCOUNT; } /* @@ -1184,14 +1194,6 @@ munmap_back: goto free_vma; } - /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform - * shmem_zero_setup (perhaps called through /dev/zero's ->mmap) - * that memory reservation must be checked; but that reservation - * belongs to shared memory object, not to vma: so now clear it. - */ - if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT)) - vma->vm_flags &= ~VM_ACCOUNT; - /* Can addr have changed?? * * Answer: Yes, several device drivers can do it in their diff --git a/mm/shmem.c b/mm/shmem.c index 5d0de96c9789..19d566ccdeea 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2628,7 +2628,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags) goto close_file; #ifdef CONFIG_SHMEM - SHMEM_I(inode)->flags = flags & VM_ACCOUNT; + SHMEM_I(inode)->flags = (flags & VM_NORESERVE) ? 0 : VM_ACCOUNT; #endif d_instantiate(dentry, inode); inode->i_size = size; diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index d19a84b79503..228be551e9c1 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -48,6 +48,7 @@ #include <linux/pim.h> #include <net/addrconf.h> #include <linux/netfilter_ipv6.h> +#include <net/ip6_checksum.h> /* Big lock, protecting vif table, mrt cache and mroute socket state. Note that the changes are semaphored via rtnl_lock. diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 5f94db2f3e9e..9454d4ae46df 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -77,6 +77,7 @@ #include <linux/poll.h> #include <linux/module.h> #include <linux/init.h> +#include <linux/mutex.h> #ifdef CONFIG_INET #include <net/inet_common.h> @@ -175,6 +176,7 @@ struct packet_sock { #endif struct packet_type prot_hook; spinlock_t bind_lock; + struct mutex pg_vec_lock; unsigned int running:1, /* prot_hook is attached*/ auxdata:1, origdev:1; @@ -1069,6 +1071,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol) */ spin_lock_init(&po->bind_lock); + mutex_init(&po->pg_vec_lock); po->prot_hook.func = packet_rcv; if (sock->type == SOCK_PACKET) @@ -1865,6 +1868,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing synchronize_net(); err = -EBUSY; + mutex_lock(&po->pg_vec_lock); if (closing || atomic_read(&po->mapped) == 0) { err = 0; #define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; }) @@ -1886,6 +1890,7 @@ static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing if (atomic_read(&po->mapped)) printk(KERN_DEBUG "packet_mmap: vma is busy: %d\n", atomic_read(&po->mapped)); } + mutex_unlock(&po->pg_vec_lock); spin_lock(&po->bind_lock); if (was_running && !po->running) { @@ -1918,7 +1923,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st size = vma->vm_end - vma->vm_start; - lock_sock(sk); + mutex_lock(&po->pg_vec_lock); if (po->pg_vec == NULL) goto out; if (size != po->pg_vec_len*po->pg_vec_pages*PAGE_SIZE) @@ -1941,7 +1946,7 @@ static int packet_mmap(struct file *file, struct socket *sock, struct vm_area_st err = 0; out: - release_sock(sk); + mutex_unlock(&po->pg_vec_lock); return err; } #endif |