diff options
55 files changed, 15886 insertions, 447 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 5bb0b9e3059f..a7d9bfe04c6b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2442,6 +2442,17 @@ S: Maintained F: drivers/iio/light/cm* F: Documentation/devicetree/bindings/i2c/trivial-devices.txt +CAVIUM LIQUIDIO NETWORK DRIVER +M: Derek Chickles <derek.chickles@caviumnetworks.com> +M: Satanand Burla <satananda.burla@caviumnetworks.com> +M: Felix Manlunas <felix.manlunas@caviumnetworks.com> +M: Raghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com> +L: netdev@vger.kernel.org +W: http://www.cavium.com +S: Supported +F: drivers/net/ethernet/cavium/ +F: drivers/net/ethernet/cavium/liquidio/ + CC2520 IEEE-802.15.4 RADIO DRIVER M: Varka Bhadram <varkabhadram@gmail.com> L: linux-wpan@vger.kernel.org diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 909c95bd7be2..feb023d7eebd 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -306,14 +306,12 @@ static int idt77105_start(struct atm_dev *dev) if (start_timer) { start_timer = 0; - init_timer(&stats_timer); + setup_timer(&stats_timer, idt77105_stats_timer_func, 0UL); stats_timer.expires = jiffies+IDT77105_STATS_TIMER_PERIOD; - stats_timer.function = idt77105_stats_timer_func; add_timer(&stats_timer); - init_timer(&restart_timer); + setup_timer(&restart_timer, idt77105_restart_timer_func, 0UL); restart_timer.expires = jiffies+IDT77105_RESTART_TIMER_PERIOD; - restart_timer.function = idt77105_restart_timer_func; add_timer(&restart_timer); } spin_unlock_irqrestore(&idt77105_priv_lock, flags); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index bf63fee4e743..c1a95a34d62e 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -190,10 +190,11 @@ #define RXBEID0_OFF 4 #define RXBDLC_OFF 5 #define RXBDAT_OFF 6 -#define RXFSIDH(n) ((n) * 4) -#define RXFSIDL(n) ((n) * 4 + 1) -#define RXFEID8(n) ((n) * 4 + 2) -#define RXFEID0(n) ((n) * 4 + 3) +#define RXFSID(n) ((n < 3) ? 0 : 4) +#define RXFSIDH(n) ((n) * 4 + RXFSID(n)) +#define RXFSIDL(n) ((n) * 4 + 1 + RXFSID(n)) +#define RXFEID8(n) ((n) * 4 + 2 + RXFSID(n)) +#define RXFEID0(n) ((n) * 4 + 3 + RXFSID(n)) #define RXMSIDH(n) ((n) * 4 + 0x20) #define RXMSIDL(n) ((n) * 4 + 0x21) #define RXMEID8(n) ((n) * 4 + 0x22) diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile index 6e10b99733a2..8584abcf5366 100644 --- a/drivers/net/ethernet/brocade/bna/Makefile +++ b/drivers/net/ethernet/brocade/bna/Makefile @@ -9,5 +9,3 @@ obj-$(CONFIG_BNA) += bna.o bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o bna-objs += cna_fwimg.o - -EXTRA_CFLAGS := -Idrivers/net/bna diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index fc3d8e3ee807..c7d8674dbe75 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -4,37 +4,53 @@ config NET_VENDOR_CAVIUM tristate "Cavium ethernet drivers" - depends on PCI && 64BIT + depends on PCI + default y ---help--- - Enable support for the Cavium ThunderX Network Interface - Controller (NIC). The NIC provides the controller and DMA - engines to move network traffic to/from the memory. The NIC - works closely with TNS, BGX and SerDes to implement the - functions replacing and virtualizing those of a typical - standalone PCIe NIC chip. + Select this option if you want enable Cavium network support. - If you have a Cavium Thunder board, say Y. + If you have a Cavium SoC or network adapter, say Y. if NET_VENDOR_CAVIUM config THUNDER_NIC_PF tristate "Thunder Physical function driver" - default NET_VENDOR_CAVIUM + depends on 64BIT + default ARCH_THUNDER select THUNDER_NIC_BGX ---help--- This driver supports Thunder's NIC physical function. + The NIC provides the controller and DMA engines to + move network traffic to/from the memory. The NIC + works closely with TNS, BGX and SerDes to implement the + functions replacing and virtualizing those of a typical + standalone PCIe NIC chip. config THUNDER_NIC_VF tristate "Thunder Virtual function driver" - default NET_VENDOR_CAVIUM + depends on 64BIT + default ARCH_THUNDER ---help--- This driver supports Thunder's NIC virtual function config THUNDER_NIC_BGX tristate "Thunder MAC interface driver (BGX)" - default NET_VENDOR_CAVIUM + depends on 64BIT + default ARCH_THUNDER ---help--- This driver supports programming and controlling of MAC interface from NIC physical function driver. +config LIQUIDIO + tristate "Cavium LiquidIO support" + select PTP_1588_CLOCK + select FW_LOADER + select LIBCRC32 + ---help--- + This driver supports Cavium LiquidIO Intelligent Server Adapters + based on CN66XX and CN68XX chips. + + To compile this driver as a module, choose M here: the module + will be called liquidio. This is recommended. + endif # NET_VENDOR_CAVIUM diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile index 7aac4780d050..d22f886ac291 100644 --- a/drivers/net/ethernet/cavium/Makefile +++ b/drivers/net/ethernet/cavium/Makefile @@ -1,5 +1,5 @@ # # Makefile for the Cavium ethernet device drivers. # - obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/ +obj-$(CONFIG_NET_VENDOR_CAVIUM) += liquidio/ diff --git a/drivers/net/ethernet/cavium/liquidio/Makefile b/drivers/net/ethernet/cavium/liquidio/Makefile new file mode 100644 index 000000000000..2f366806835d --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/Makefile @@ -0,0 +1,16 @@ +# +# Cavium Liquidio ethernet device driver +# +obj-$(CONFIG_LIQUIDIO) += liquidio.o + +liquidio-objs := lio_main.o \ + lio_ethtool.o \ + request_manager.o \ + response_manager.o \ + octeon_device.o \ + cn66xx_device.o \ + cn68xx_device.o \ + octeon_mem_ops.o \ + octeon_droq.o \ + octeon_console.o \ + octeon_nic.o diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c new file mode 100644 index 000000000000..d23f494e2582 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c @@ -0,0 +1,796 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +int lio_cn6xxx_soft_reset(struct octeon_device *oct) +{ + octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); + + dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); + + lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); + octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); + + lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); + lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); + + /* make sure that the reset is written before starting timer */ + mmiowb(); + + /* Wait for 10ms as Octeon resets. */ + mdelay(100); + + if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1) == 0x1234ULL) { + dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); + octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); + + return 0; +} + +void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct) +{ + u32 val; + + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); + if (val & 0x000f0000) { + dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n", + val & 0x000f0000); + } + + val |= 0xf; /* Enable Link error reporting */ + + dev_dbg(&oct->pci_dev->dev, "Enabling PCI-E error reporting..\n"); + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); +} + +void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, + enum octeon_pcie_mps mps) +{ + u32 val; + u64 r64; + + /* Read config register for MPS */ + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); + + if (mps == PCIE_MPS_DEFAULT) { + mps = ((val & (0x7 << 5)) >> 5); + } else { + val &= ~(0x7 << 5); /* Turn off any MPS bits */ + val |= (mps << 5); /* Set MPS */ + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); + } + + /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */ + r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); + r64 |= (mps << 4); + lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); +} + +void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, + enum octeon_pcie_mrrs mrrs) +{ + u32 val; + u64 r64; + + /* Read config register for MRRS */ + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val); + + if (mrrs == PCIE_MRRS_DEFAULT) { + mrrs = ((val & (0x7 << 12)) >> 12); + } else { + val &= ~(0x7 << 12); /* Turn off any MRRS bits */ + val |= (mrrs << 12); /* Set MRRS */ + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, val); + } + + /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */ + r64 = octeon_read_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port)); + r64 |= mrrs; + octeon_write_csr64(oct, CN6XXX_SLI_S2M_PORTX_CTL(oct->pcie_port), r64); + + /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */ + r64 = lio_pci_readq(oct, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); + r64 |= mrrs; + lio_pci_writeq(oct, r64, CN6XXX_DPI_SLI_PRTX_CFG(oct->pcie_port)); +} + +u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct) +{ + /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier + * for SLI. + */ + return ((lio_pci_readq(oct, CN6XXX_MIO_RST_BOOT) >> 24) & 0x3f) * 50; +} + +u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, + u32 time_intr_in_us) +{ + /* This gives the SLI clock per microsec */ + u32 oqticks_per_us = lio_cn6xxx_coprocessor_clock(oct); + + /* core clock per us / oq ticks will be fractional. TO avoid that + * we use the method below. + */ + + /* This gives the clock cycles per millisecond */ + oqticks_per_us *= 1000; + + /* This gives the oq ticks (1024 core clock cycles) per millisecond */ + oqticks_per_us /= 1024; + + /* time_intr is in microseconds. The next 2 steps gives the oq ticks + * corressponding to time_intr. + */ + oqticks_per_us *= time_intr_in_us; + oqticks_per_us /= 1000; + + return oqticks_per_us; +} + +void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct) +{ + /* Select Round-Robin Arb, ES, RO, NS for Input Queues */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_INPUT_CONTROL, + CN6XXX_INPUT_CTL_MASK); + + /* Instruction Read Size - Max 4 instructions per PCIE Read */ + octeon_write_csr64(oct, CN6XXX_SLI_PKT_INSTR_RD_SIZE, + 0xFFFFFFFFFFFFFFFFULL); + + /* Select PCIE Port for all Input rings. */ + octeon_write_csr64(oct, CN6XXX_SLI_IN_PCIE_PORT, + (oct->pcie_port * 0x5555555555555555ULL)); +} + +static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device *oct) +{ + u64 pktctl; + + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); + + /* 66XX SPECIFIC */ + if (CFG_GET_OQ_MAX_Q(cn6xxx->conf) <= 4) + /* Disable RING_EN if only upto 4 rings are used. */ + pktctl &= ~(1 << 4); + else + pktctl |= (1 << 4); + + if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) + pktctl |= 0xF; + else + /* Disable per-port backpressure. */ + pktctl &= ~0xF; + octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); +} + +void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct) +{ + u32 time_threshold; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + /* / Select PCI-E Port for all Output queues */ + octeon_write_csr64(oct, CN6XXX_SLI_PKT_PCIE_PORT64, + (oct->pcie_port * 0x5555555555555555ULL)); + + if (CFG_GET_IS_SLI_BP_ON(cn6xxx->conf)) { + octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 32); + } else { + /* / Set Output queue watermark to 0 to disable backpressure */ + octeon_write_csr64(oct, CN6XXX_SLI_OQ_WMARK, 0); + } + + /* / Select Info Ptr for length & data */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_IPTR, 0xFFFFFFFF); + + /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0); + + /* / Select ES,RO,NS setting from register for Output Queue Packet + * Address + */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF); + + /* No Relaxed Ordering, No Snoop, 64-bit swap for Output + * Queue ScatterList + */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_ROR, 0); + octeon_write_csr(oct, CN6XXX_SLI_PKT_SLIST_NS, 0); + + /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */ +#ifdef __BIG_ENDIAN_BITFIELD + octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, + 0x5555555555555555ULL); +#else + octeon_write_csr64(oct, CN6XXX_SLI_PKT_SLIST_ES64, 0ULL); +#endif + + /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_ROR, 0); + octeon_write_csr(oct, CN6XXX_SLI_PKT_DATA_OUT_NS, 0); + octeon_write_csr64(oct, CN6XXX_SLI_PKT_DATA_OUT_ES64, + 0x5555555555555555ULL); + + /* / Set up interrupt packet and time threshold */ + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + (u32)CFG_GET_OQ_INTR_PKT(cn6xxx->conf)); + time_threshold = + lio_cn6xxx_get_oq_ticks(oct, (u32) + CFG_GET_OQ_INTR_TIME(cn6xxx->conf)); + + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); +} + +static int lio_cn6xxx_setup_device_regs(struct octeon_device *oct) +{ + lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); + lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_512B); + lio_cn6xxx_enable_error_reporting(oct); + + lio_cn6xxx_setup_global_input_regs(oct); + lio_cn66xx_setup_pkt_ctl_regs(oct); + lio_cn6xxx_setup_global_output_regs(oct); + + /* Default error timeout value should be 0x200000 to avoid host hang + * when reads invalid register + */ + octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); + return 0; +} + +void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + + /* Disable Packet-by-Packet mode; No Parse Mode or Skip length */ + octeon_write_csr64(oct, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no), 0); + + /* Write the start of the input queue's ring and its size */ + octeon_write_csr64(oct, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no), + iq->base_addr_dma); + octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); + + /* Remember the doorbell & instruction count register addr for this + * queue + */ + iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); + iq->inst_cnt_reg = oct->mmio[0].hw_addr + + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no); + dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n", + iq_no, iq->doorbell_reg, iq->inst_cnt_reg); + + /* Store the current instruction counter + * (used in flush_iq calculation) + */ + iq->reset_instr_cnt = readl(iq->inst_cnt_reg); +} + +static void lio_cn66xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no) +{ + lio_cn6xxx_setup_iq_regs(oct, iq_no); + + /* Backpressure for this queue - WMARK set to all F's. This effectively + * disables the backpressure mechanism. + */ + octeon_write_csr64(oct, CN66XX_SLI_IQ_BP64(iq_no), + (0xFFFFFFFFULL << 32)); +} + +void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no) +{ + u32 intr; + struct octeon_droq *droq = oct->droq[oq_no]; + + octeon_write_csr64(oct, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no), + droq->desc_ring_dma); + octeon_write_csr(oct, CN6XXX_SLI_OQ_SIZE(oq_no), droq->max_count); + + octeon_write_csr(oct, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no), + (droq->buffer_size | (OCT_RH_SIZE << 16))); + + /* Get the mapped address of the pkt_sent and pkts_credit regs */ + droq->pkts_sent_reg = + oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_SENT(oq_no); + droq->pkts_credit_reg = + oct->mmio[0].hw_addr + CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no); + + /* Enable this output queue to generate Packet Timer Interrupt */ + intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); + intr |= (1 << oq_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB, intr); + + /* Enable this output queue to generate Packet Timer Interrupt */ + intr = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); + intr |= (1 << oq_no); + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB, intr); +} + +void lio_cn6xxx_enable_io_queues(struct octeon_device *oct) +{ + u32 mask; + + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE); + mask |= oct->io_qmask.iq64B; + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_SIZE, mask); + + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); + mask |= oct->io_qmask.iq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); + + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); + mask |= oct->io_qmask.oq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); +} + +void lio_cn6xxx_disable_io_queues(struct octeon_device *oct) +{ + u32 mask, i, loop = HZ; + u32 d32; + + /* Reset the Enable bits for Input Queues. */ + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB); + mask ^= oct->io_qmask.iq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask); + + /* Wait until hardware indicates that the queues are out of reset. */ + mask = oct->io_qmask.iq; + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); + while (((d32 & mask) != mask) && loop--) { + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ); + schedule_timeout_uninterruptible(1); + } + + /* Reset the doorbell register for each Input queue. */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + if (!(oct->io_qmask.iq & (1UL << i))) + continue; + octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF); + d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i)); + } + + /* Reset the Enable bits for Output Queues. */ + mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); + mask ^= oct->io_qmask.oq; + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, mask); + + /* Wait until hardware indicates that the queues are out of reset. */ + loop = HZ; + mask = oct->io_qmask.oq; + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); + while (((d32 & mask) != mask) && loop--) { + d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ); + schedule_timeout_uninterruptible(1); + } + ; + + /* Reset the doorbell register for each Output queue. */ + /* for (i = 0; i < oct->num_oqs; i++) { */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + if (!(oct->io_qmask.oq & (1UL << i))) + continue; + octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF); + d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i)); + + d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i)); + octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_SENT(i), d32); + } + + d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); + if (d32) + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, d32); + + d32 = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); + if (d32) + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, d32); +} + +void lio_cn6xxx_reinit_regs(struct octeon_device *oct) +{ + u32 i; + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + if (!(oct->io_qmask.iq & (1UL << i))) + continue; + oct->fn_list.setup_iq_regs(oct, i); + } + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + if (!(oct->io_qmask.oq & (1UL << i))) + continue; + oct->fn_list.setup_oq_regs(oct, i); + } + + oct->fn_list.setup_device_regs(oct); + + oct->fn_list.enable_interrupt(oct->chip); + + oct->fn_list.enable_io_queues(oct); + + /* for (i = 0; i < oct->num_oqs; i++) { */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + if (!(oct->io_qmask.oq & (1UL << i))) + continue; + writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg); + } +} + +void +lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, + u64 core_addr, + u32 idx, + int valid) +{ + u64 bar1; + + if (valid == 0) { + bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); + lio_pci_writeq(oct, (bar1 & 0xFFFFFFFEULL), + CN6XXX_BAR1_REG(idx, oct->pcie_port)); + bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); + return; + } + + /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of + * the Core Addr + */ + lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK), + CN6XXX_BAR1_REG(idx, oct->pcie_port)); + + bar1 = lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); +} + +void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, + u32 idx, + u32 mask) +{ + lio_pci_writeq(oct, mask, CN6XXX_BAR1_REG(idx, oct->pcie_port)); +} + +u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx) +{ + return (u32)lio_pci_readq(oct, CN6XXX_BAR1_REG(idx, oct->pcie_port)); +} + +u32 +lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), + struct octeon_instr_queue *iq) +{ + u32 new_idx = readl(iq->inst_cnt_reg); + + /* The new instr cnt reg is a 32-bit counter that can roll over. We have + * noted the counter's initial value at init time into + * reset_instr_cnt + */ + if (iq->reset_instr_cnt < new_idx) + new_idx -= iq->reset_instr_cnt; + else + new_idx += (0xffffffff - iq->reset_instr_cnt) + 1; + + /* Modulo of the new index with the IQ size will give us + * the new index. + */ + new_idx %= iq->max_count; + + return new_idx; +} + +void lio_cn6xxx_enable_interrupt(void *chip) +{ + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; + u64 mask = cn6xxx->intr_mask64 | CN6XXX_INTR_DMA0_FORCE; + + /* Enable Interrupt */ + writeq(mask, cn6xxx->intr_enb_reg64); +} + +void lio_cn6xxx_disable_interrupt(void *chip) +{ + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; + + /* Disable Interrupts */ + writeq(0, cn6xxx->intr_enb_reg64); + + /* make sure interrupts are really disabled */ + mmiowb(); +} + +void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct) +{ + /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register + * to determine the PCIE port # + */ + oct->pcie_port = octeon_read_csr(oct, CN6XXX_SLI_MAC_NUMBER) & 0xff; + + dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port); +} + +void +lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64) +{ + dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n", + CVM_CAST64(intr64)); +} + +int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct) +{ + struct octeon_droq *droq; + u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb; + u32 droq_cnt_enb, droq_cnt_mask; + + droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB); + droq_cnt_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT); + droq_mask = droq_cnt_mask & droq_cnt_enb; + + droq_time_mask = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT); + droq_int_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_TIME_INT_ENB); + droq_mask |= (droq_time_mask & droq_int_enb); + + droq_mask &= oct->io_qmask.oq; + + oct->droq_intr = 0; + + /* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */ + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { + if (!(droq_mask & (1 << oq_no))) + continue; + + droq = oct->droq[oq_no]; + pkt_count = octeon_droq_check_hw_for_pkts(oct, droq); + if (pkt_count) { + oct->droq_intr |= (1ULL << oq_no); + if (droq->ops.poll_mode) { + u32 value; + u32 reg; + + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + + /* disable interrupts for this droq */ + spin_lock + (&cn6xxx->lock_for_droq_int_enb_reg); + reg = CN6XXX_SLI_PKT_TIME_INT_ENB; + value = octeon_read_csr(oct, reg); + value &= ~(1 << oq_no); + octeon_write_csr(oct, reg, value); + reg = CN6XXX_SLI_PKT_CNT_INT_ENB; + value = octeon_read_csr(oct, reg); + value &= ~(1 << oq_no); + octeon_write_csr(oct, reg, value); + + /* Ensure that the enable register is written. + */ + mmiowb(); + + spin_unlock(&cn6xxx->lock_for_droq_int_enb_reg); + } + } + } + + droq_time_mask &= oct->io_qmask.oq; + droq_cnt_mask &= oct->io_qmask.oq; + + /* Reset the PKT_CNT/TIME_INT registers. */ + if (droq_time_mask) + octeon_write_csr(oct, CN6XXX_SLI_PKT_TIME_INT, droq_time_mask); + + if (droq_cnt_mask) /* reset PKT_CNT register:66xx */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_CNT_INT, droq_cnt_mask); + + return 0; +} + +irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + u64 intr64; + + intr64 = readq(cn6xxx->intr_sum_reg64); + + /* If our device has interrupted, then proceed. + * Also check for all f's if interrupt was triggered on an error + * and the PCI read fails. + */ + if (!intr64 || (intr64 == 0xFFFFFFFFFFFFFFFFULL)) + return IRQ_NONE; + + oct->int_status = 0; + + if (intr64 & CN6XXX_INTR_ERR) + lio_cn6xxx_process_pcie_error_intr(oct, intr64); + + if (intr64 & CN6XXX_INTR_PKT_DATA) { + lio_cn6xxx_process_droq_intr_regs(oct); + oct->int_status |= OCT_DEV_INTR_PKT_DATA; + } + + if (intr64 & CN6XXX_INTR_DMA0_FORCE) + oct->int_status |= OCT_DEV_INTR_DMA0_FORCE; + + if (intr64 & CN6XXX_INTR_DMA1_FORCE) + oct->int_status |= OCT_DEV_INTR_DMA1_FORCE; + + /* Clear the current interrupts */ + writeq(intr64, cn6xxx->intr_sum_reg64); + + return IRQ_HANDLED; +} + +void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, + void *chip, + struct octeon_reg_list *reg_list) +{ + u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)chip; + + reg_list->pci_win_wr_addr_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_HI); + reg_list->pci_win_wr_addr_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR_LO); + reg_list->pci_win_wr_addr = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_ADDR64); + + reg_list->pci_win_rd_addr_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_HI); + reg_list->pci_win_rd_addr_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR_LO); + reg_list->pci_win_rd_addr = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_ADDR64); + + reg_list->pci_win_wr_data_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_HI); + reg_list->pci_win_wr_data_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA_LO); + reg_list->pci_win_wr_data = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_WR_DATA64); + + reg_list->pci_win_rd_data_hi = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_HI); + reg_list->pci_win_rd_data_lo = + (u32 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA_LO); + reg_list->pci_win_rd_data = + (u64 __iomem *)(bar0_pciaddr + CN6XXX_WIN_RD_DATA64); + + lio_cn6xxx_get_pcie_qlmport(oct); + + cn6xxx->intr_sum_reg64 = bar0_pciaddr + CN6XXX_SLI_INT_SUM64; + cn6xxx->intr_mask64 = CN6XXX_INTR_MASK; + cn6xxx->intr_enb_reg64 = + bar0_pciaddr + CN6XXX_SLI_INT_ENB64(oct->pcie_port); +} + +int lio_setup_cn66xx_octeon_device(struct octeon_device *oct) +{ + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { + dev_err(&oct->pci_dev->dev, "%s CN66XX BAR1 map failed\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + spin_lock_init(&cn6xxx->lock_for_droq_int_enb_reg); + + oct->fn_list.setup_iq_regs = lio_cn66xx_setup_iq_regs; + oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; + + oct->fn_list.soft_reset = lio_cn6xxx_soft_reset; + oct->fn_list.setup_device_regs = lio_cn6xxx_setup_device_regs; + oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs; + oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; + + oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; + oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; + oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; + + oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; + oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; + oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; + + oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; + oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; + + lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); + + cn6xxx->conf = (struct octeon_config *) + oct_get_config_info(oct, LIO_210SV); + if (!cn6xxx->conf) { + dev_err(&oct->pci_dev->dev, "%s No Config found for CN66XX\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + return 1; + } + + oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); + + return 0; +} + +int lio_validate_cn6xxx_config_info(struct octeon_device *oct, + struct octeon_config *conf6xxx) +{ + /* int total_instrs = 0; */ + + if (CFG_GET_IQ_MAX_Q(conf6xxx) > CN6XXX_MAX_INPUT_QUEUES) { + dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n", + __func__, CFG_GET_IQ_MAX_Q(conf6xxx), + CN6XXX_MAX_INPUT_QUEUES); + return 1; + } + + if (CFG_GET_OQ_MAX_Q(conf6xxx) > CN6XXX_MAX_OUTPUT_QUEUES) { + dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n", + __func__, CFG_GET_OQ_MAX_Q(conf6xxx), + CN6XXX_MAX_OUTPUT_QUEUES); + return 1; + } + + if (CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_32BYTE_INSTR && + CFG_GET_IQ_INSTR_TYPE(conf6xxx) != OCTEON_64BYTE_INSTR) { + dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n", + __func__); + return 1; + } + if (!(CFG_GET_OQ_INFO_PTR(conf6xxx)) || + !(CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx))) { + dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n", + __func__); + return 1; + } + + if (!(CFG_GET_OQ_INTR_TIME(conf6xxx))) { + dev_err(&oct->pci_dev->dev, "%s: No Time Interrupt for OQ\n", + __func__); + return 1; + } + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h new file mode 100644 index 000000000000..f77918779355 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h @@ -0,0 +1,107 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file cn66xx_device.h + * \brief Host Driver: Routines that perform CN66XX specific operations. + */ + +#ifndef __CN66XX_DEVICE_H__ +#define __CN66XX_DEVICE_H__ + +/* Register address and configuration for a CN6XXX devices. + * If device specific changes need to be made then add a struct to include + * device specific fields as shown in the commented section + */ +struct octeon_cn6xxx { + /** PCI interrupt summary register */ + u8 __iomem *intr_sum_reg64; + + /** PCI interrupt enable register */ + u8 __iomem *intr_enb_reg64; + + /** The PCI interrupt mask used by interrupt handler */ + u64 intr_mask64; + + struct octeon_config *conf; + + /* Example additional fields - not used currently + * struct { + * }cn6xyz; + */ + + /* For the purpose of atomic access to interrupt enable reg */ + spinlock_t lock_for_droq_int_enb_reg; + +}; + +enum octeon_pcie_mps { + PCIE_MPS_DEFAULT = -1, /* Use the default setup by BIOS */ + PCIE_MPS_128B = 0, + PCIE_MPS_256B = 1 +}; + +enum octeon_pcie_mrrs { + PCIE_MRRS_DEFAULT = -1, /* Use the default setup by BIOS */ + PCIE_MRRS_128B = 0, + PCIE_MRRS_256B = 1, + PCIE_MRRS_512B = 2, + PCIE_MRRS_1024B = 3, + PCIE_MRRS_2048B = 4, + PCIE_MRRS_4096B = 5 +}; + +/* Common functions for 66xx and 68xx */ +int lio_cn6xxx_soft_reset(struct octeon_device *oct); +void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct); +void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct, + enum octeon_pcie_mps mps); +void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct, + enum octeon_pcie_mrrs mrrs); +void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct); +void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct); +void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no); +void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no); +void lio_cn6xxx_enable_io_queues(struct octeon_device *oct); +void lio_cn6xxx_disable_io_queues(struct octeon_device *oct); +void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64); +int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct); +irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev); +void lio_cn6xxx_reinit_regs(struct octeon_device *oct); +void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr, + u32 idx, int valid); +void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask); +u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx); +u32 +lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)), + struct octeon_instr_queue *iq); +void lio_cn6xxx_enable_interrupt(void *chip); +void lio_cn6xxx_disable_interrupt(void *chip); +void cn6xxx_get_pcie_qlmport(struct octeon_device *oct); +void lio_cn6xxx_setup_reg_address(struct octeon_device *oct, void *chip, + struct octeon_reg_list *reg_list); +u32 lio_cn6xxx_coprocessor_clock(struct octeon_device *oct); +u32 lio_cn6xxx_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us); +int lio_setup_cn66xx_octeon_device(struct octeon_device *); +int lio_validate_cn6xxx_config_info(struct octeon_device *oct, + struct octeon_config *); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h new file mode 100644 index 000000000000..5e3aff242ad3 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_regs.h @@ -0,0 +1,535 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file cn66xx_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN66XX devices. + */ + +#ifndef __CN66XX_REGS_H__ +#define __CN66XX_REGS_H__ + +#define CN6XXX_XPANSION_BAR 0x30 + +#define CN6XXX_MSI_CAP 0x50 +#define CN6XXX_MSI_ADDR_LO 0x54 +#define CN6XXX_MSI_ADDR_HI 0x58 +#define CN6XXX_MSI_DATA 0x5C + +#define CN6XXX_PCIE_CAP 0x70 +#define CN6XXX_PCIE_DEVCAP 0x74 +#define CN6XXX_PCIE_DEVCTL 0x78 +#define CN6XXX_PCIE_LINKCAP 0x7C +#define CN6XXX_PCIE_LINKCTL 0x80 +#define CN6XXX_PCIE_SLOTCAP 0x84 +#define CN6XXX_PCIE_SLOTCTL 0x88 + +#define CN6XXX_PCIE_ENH_CAP 0x100 +#define CN6XXX_PCIE_UNCORR_ERR_STATUS 0x104 +#define CN6XXX_PCIE_UNCORR_ERR_MASK 0x108 +#define CN6XXX_PCIE_UNCORR_ERR 0x10C +#define CN6XXX_PCIE_CORR_ERR_STATUS 0x110 +#define CN6XXX_PCIE_CORR_ERR_MASK 0x114 +#define CN6XXX_PCIE_ADV_ERR_CAP 0x118 + +#define CN6XXX_PCIE_ACK_REPLAY_TIMER 0x700 +#define CN6XXX_PCIE_OTHER_MSG 0x704 +#define CN6XXX_PCIE_PORT_FORCE_LINK 0x708 +#define CN6XXX_PCIE_ACK_FREQ 0x70C +#define CN6XXX_PCIE_PORT_LINK_CTL 0x710 +#define CN6XXX_PCIE_LANE_SKEW 0x714 +#define CN6XXX_PCIE_SYM_NUM 0x718 +#define CN6XXX_PCIE_FLTMSK 0x720 + +/* ############## BAR0 Registers ################ */ + +#define CN6XXX_SLI_CTL_PORT0 0x0050 +#define CN6XXX_SLI_CTL_PORT1 0x0060 + +#define CN6XXX_SLI_WINDOW_CTL 0x02E0 +#define CN6XXX_SLI_DBG_DATA 0x0310 +#define CN6XXX_SLI_SCRATCH1 0x03C0 +#define CN6XXX_SLI_SCRATCH2 0x03D0 +#define CN6XXX_SLI_CTL_STATUS 0x0570 + +#define CN6XXX_WIN_WR_ADDR_LO 0x0000 +#define CN6XXX_WIN_WR_ADDR_HI 0x0004 +#define CN6XXX_WIN_WR_ADDR64 CN6XXX_WIN_WR_ADDR_LO + +#define CN6XXX_WIN_RD_ADDR_LO 0x0010 +#define CN6XXX_WIN_RD_ADDR_HI 0x0014 +#define CN6XXX_WIN_RD_ADDR64 CN6XXX_WIN_RD_ADDR_LO + +#define CN6XXX_WIN_WR_DATA_LO 0x0020 +#define CN6XXX_WIN_WR_DATA_HI 0x0024 +#define CN6XXX_WIN_WR_DATA64 CN6XXX_WIN_WR_DATA_LO + +#define CN6XXX_WIN_RD_DATA_LO 0x0040 +#define CN6XXX_WIN_RD_DATA_HI 0x0044 +#define CN6XXX_WIN_RD_DATA64 CN6XXX_WIN_RD_DATA_LO + +#define CN6XXX_WIN_WR_MASK_LO 0x0030 +#define CN6XXX_WIN_WR_MASK_HI 0x0034 +#define CN6XXX_WIN_WR_MASK_REG CN6XXX_WIN_WR_MASK_LO + +/* 1 register (32-bit) to enable Input queues */ +#define CN6XXX_SLI_PKT_INSTR_ENB 0x1000 + +/* 1 register (32-bit) to enable Output queues */ +#define CN6XXX_SLI_PKT_OUT_ENB 0x1010 + +/* 1 register (32-bit) to determine whether Output queues are in reset. */ +#define CN6XXX_SLI_PORT_IN_RST_OQ 0x11F0 + +/* 1 register (32-bit) to determine whether Input queues are in reset. */ +#define CN6XXX_SLI_PORT_IN_RST_IQ 0x11F4 + +/*###################### REQUEST QUEUE #########################*/ + +/* 1 register (32-bit) - instr. size of each input queue. */ +#define CN6XXX_SLI_PKT_INSTR_SIZE 0x1020 + +/* 32 registers for Input Queue Instr Count - SLI_PKT_IN_DONE0_CNTS */ +#define CN6XXX_SLI_IQ_INSTR_COUNT_START 0x2000 + +/* 32 registers for Input Queue Start Addr - SLI_PKT0_INSTR_BADDR */ +#define CN6XXX_SLI_IQ_BASE_ADDR_START64 0x2800 + +/* 32 registers for Input Doorbell - SLI_PKT0_INSTR_BAOFF_DBELL */ +#define CN6XXX_SLI_IQ_DOORBELL_START 0x2C00 + +/* 32 registers for Input Queue size - SLI_PKT0_INSTR_FIFO_RSIZE */ +#define CN6XXX_SLI_IQ_SIZE_START 0x3000 + +/* 32 registers for Instruction Header Options - SLI_PKT0_INSTR_HEADER */ +#define CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 0x3400 + +/* 1 register (64-bit) - Back Pressure for each input queue - SLI_PKT0_IN_BP */ +#define CN66XX_SLI_INPUT_BP_START64 0x3800 + +/* Each Input Queue register is at a 16-byte Offset in BAR0 */ +#define CN6XXX_IQ_OFFSET 0x10 + +/* 1 register (32-bit) - ES, RO, NS, Arbitration for Input Queue Data & + * gather list fetches. SLI_PKT_INPUT_CONTROL. + */ +#define CN6XXX_SLI_PKT_INPUT_CONTROL 0x1170 + +/* 1 register (64-bit) - Number of instructions to read at one time + * - 2 bits for each input ring. SLI_PKT_INSTR_RD_SIZE. + */ +#define CN6XXX_SLI_PKT_INSTR_RD_SIZE 0x11A0 + +/* 1 register (64-bit) - Assign Input ring to MAC port + * - 2 bits for each input ring. SLI_PKT_IN_PCIE_PORT. + */ +#define CN6XXX_SLI_IN_PCIE_PORT 0x11B0 + +/*------- Request Queue Macros ---------*/ +#define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ + (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_SIZE(iq) \ + (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ + (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_DOORBELL(iq) \ + (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ + (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET)) + +#define CN66XX_SLI_IQ_BP64(iq) \ + (CN66XX_SLI_INPUT_BP_START64 + ((iq) * CN6XXX_IQ_OFFSET)) + +/*------------------ Masks ----------------*/ +#define CN6XXX_INPUT_CTL_ROUND_ROBIN_ARB BIT(22) +#define CN6XXX_INPUT_CTL_DATA_NS BIT(8) +#define CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP BIT(6) +#define CN6XXX_INPUT_CTL_DATA_RO BIT(5) +#define CN6XXX_INPUT_CTL_USE_CSR BIT(4) +#define CN6XXX_INPUT_CTL_GATHER_NS BIT(3) +#define CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP BIT(2) +#define CN6XXX_INPUT_CTL_GATHER_RO BIT(1) + +#ifdef __BIG_ENDIAN_BITFIELD +#define CN6XXX_INPUT_CTL_MASK \ + (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN6XXX_INPUT_CTL_USE_CSR \ + | CN6XXX_INPUT_CTL_GATHER_ES_64B_SWAP) +#else +#define CN6XXX_INPUT_CTL_MASK \ + (CN6XXX_INPUT_CTL_DATA_ES_64B_SWAP \ + | CN6XXX_INPUT_CTL_USE_CSR) +#endif + +/*############################ OUTPUT QUEUE #########################*/ + +/* 32 registers for Output queue buffer and info size - SLI_PKT0_OUT_SIZE */ +#define CN6XXX_SLI_OQ0_BUFF_INFO_SIZE 0x0C00 + +/* 32 registers for Output Queue Start Addr - SLI_PKT0_SLIST_BADDR */ +#define CN6XXX_SLI_OQ_BASE_ADDR_START64 0x1400 + +/* 32 registers for Output Queue Packet Credits - SLI_PKT0_SLIST_BAOFF_DBELL */ +#define CN6XXX_SLI_OQ_PKT_CREDITS_START 0x1800 + +/* 32 registers for Output Queue size - SLI_PKT0_SLIST_FIFO_RSIZE */ +#define CN6XXX_SLI_OQ_SIZE_START 0x1C00 + +/* 32 registers for Output Queue Packet Count - SLI_PKT0_CNTS */ +#define CN6XXX_SLI_OQ_PKT_SENT_START 0x2400 + +/* Each Output Queue register is at a 16-byte Offset in BAR0 */ +#define CN6XXX_OQ_OFFSET 0x10 + +/* 1 register (32-bit) - 1 bit for each output queue + * - Relaxed Ordering setting for reading Output Queues descriptors + * - SLI_PKT_SLIST_ROR + */ +#define CN6XXX_SLI_PKT_SLIST_ROR 0x1030 + +/* 1 register (32-bit) - 1 bit for each output queue + * - No Snoop mode for reading Output Queues descriptors + * - SLI_PKT_SLIST_NS + */ +#define CN6XXX_SLI_PKT_SLIST_NS 0x1040 + +/* 1 register (64-bit) - 2 bits for each output queue + * - Endian-Swap mode for reading Output Queue descriptors + * - SLI_PKT_SLIST_ES + */ +#define CN6XXX_SLI_PKT_SLIST_ES64 0x1050 + +/* 1 register (32-bit) - 1 bit for each output queue + * - InfoPtr mode for Output Queues. + * - SLI_PKT_IPTR + */ +#define CN6XXX_SLI_PKT_IPTR 0x1070 + +/* 1 register (32-bit) - 1 bit for each output queue + * - DPTR format selector for Output queues. + * - SLI_PKT_DPADDR + */ +#define CN6XXX_SLI_PKT_DPADDR 0x1080 + +/* 1 register (32-bit) - 1 bit for each output queue + * - Relaxed Ordering setting for reading Output Queues data + * - SLI_PKT_DATA_OUT_ROR + */ +#define CN6XXX_SLI_PKT_DATA_OUT_ROR 0x1090 + +/* 1 register (32-bit) - 1 bit for each output queue + * - No Snoop mode for reading Output Queues data + * - SLI_PKT_DATA_OUT_NS + */ +#define CN6XXX_SLI_PKT_DATA_OUT_NS 0x10A0 + +/* 1 register (64-bit) - 2 bits for each output queue + * - Endian-Swap mode for reading Output Queue data + * - SLI_PKT_DATA_OUT_ES + */ +#define CN6XXX_SLI_PKT_DATA_OUT_ES64 0x10B0 + +/* 1 register (32-bit) - 1 bit for each output queue + * - Controls whether SLI_PKTn_CNTS is incremented for bytes or for packets. + * - SLI_PKT_OUT_BMODE + */ +#define CN6XXX_SLI_PKT_OUT_BMODE 0x10D0 + +/* 1 register (64-bit) - 2 bits for each output queue + * - Assign PCIE port for Output queues + * - SLI_PKT_PCIE_PORT. + */ +#define CN6XXX_SLI_PKT_PCIE_PORT64 0x10E0 + +/* 1 (64-bit) register for Output Queue Packet Count Interrupt Threshold + * & Time Threshold. The same setting applies to all 32 queues. + * The register is defined as a 64-bit registers, but we use the + * 32-bit offsets to define distinct addresses. + */ +#define CN6XXX_SLI_OQ_INT_LEVEL_PKTS 0x1120 +#define CN6XXX_SLI_OQ_INT_LEVEL_TIME 0x1124 + +/* 1 (64-bit register) for Output Queue backpressure across all rings. */ +#define CN6XXX_SLI_OQ_WMARK 0x1180 + +/* 1 register to control output queue global backpressure & ring enable. */ +#define CN6XXX_SLI_PKT_CTL 0x1220 + +/*------- Output Queue Macros ---------*/ +#define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \ + (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_SIZE(oq) \ + (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \ + (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_PKTS_SENT(oq) \ + (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET)) + +#define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \ + (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSET)) + +/*######################### DMA Counters #########################*/ + +/* 2 registers (64-bit) - DMA Count - 1 for each DMA counter 0/1. */ +#define CN6XXX_DMA_CNT_START 0x0400 + +/* 2 registers (64-bit) - DMA Timer 0/1, contains DMA timer values + * SLI_DMA_0_TIM + */ +#define CN6XXX_DMA_TIM_START 0x0420 + +/* 2 registers (64-bit) - DMA count & Time Interrupt threshold - + * SLI_DMA_0_INT_LEVEL + */ +#define CN6XXX_DMA_INT_LEVEL_START 0x03E0 + +/* Each DMA register is at a 16-byte Offset in BAR0 */ +#define CN6XXX_DMA_OFFSET 0x10 + +/*---------- DMA Counter Macros ---------*/ +#define CN6XXX_DMA_CNT(dq) \ + (CN6XXX_DMA_CNT_START + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_INT_LEVEL(dq) \ + (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_PKT_INT_LEVEL(dq) \ + (CN6XXX_DMA_INT_LEVEL_START + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_TIME_INT_LEVEL(dq) \ + (CN6XXX_DMA_INT_LEVEL_START + 4 + ((dq) * CN6XXX_DMA_OFFSET)) + +#define CN6XXX_DMA_TIM(dq) \ + (CN6XXX_DMA_TIM_START + ((dq) * CN6XXX_DMA_OFFSET)) + +/*######################## INTERRUPTS #########################*/ + +/* 1 register (64-bit) for Interrupt Summary */ +#define CN6XXX_SLI_INT_SUM64 0x0330 + +/* 1 register (64-bit) for Interrupt Enable */ +#define CN6XXX_SLI_INT_ENB64_PORT0 0x0340 +#define CN6XXX_SLI_INT_ENB64_PORT1 0x0350 + +/* 1 register (32-bit) to enable Output Queue Packet/Byte Count Interrupt */ +#define CN6XXX_SLI_PKT_CNT_INT_ENB 0x1150 + +/* 1 register (32-bit) to enable Output Queue Packet Timer Interrupt */ +#define CN6XXX_SLI_PKT_TIME_INT_ENB 0x1160 + +/* 1 register (32-bit) to indicate which Output Queue reached pkt threshold */ +#define CN6XXX_SLI_PKT_CNT_INT 0x1130 + +/* 1 register (32-bit) to indicate which Output Queue reached time threshold */ +#define CN6XXX_SLI_PKT_TIME_INT 0x1140 + +/*------------------ Interrupt Masks ----------------*/ + +#define CN6XXX_INTR_RML_TIMEOUT_ERR BIT(1) +#define CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR BIT(2) +#define CN6XXX_INTR_IO2BIG_ERR BIT(3) +#define CN6XXX_INTR_PKT_COUNT BIT(4) +#define CN6XXX_INTR_PKT_TIME BIT(5) +#define CN6XXX_INTR_M0UPB0_ERR BIT(8) +#define CN6XXX_INTR_M0UPWI_ERR BIT(9) +#define CN6XXX_INTR_M0UNB0_ERR BIT(10) +#define CN6XXX_INTR_M0UNWI_ERR BIT(11) +#define CN6XXX_INTR_M1UPB0_ERR BIT(12) +#define CN6XXX_INTR_M1UPWI_ERR BIT(13) +#define CN6XXX_INTR_M1UNB0_ERR BIT(14) +#define CN6XXX_INTR_M1UNWI_ERR BIT(15) +#define CN6XXX_INTR_MIO_INT0 BIT(16) +#define CN6XXX_INTR_MIO_INT1 BIT(17) +#define CN6XXX_INTR_MAC_INT0 BIT(18) +#define CN6XXX_INTR_MAC_INT1 BIT(19) + +#define CN6XXX_INTR_DMA0_FORCE BIT_ULL(32) +#define CN6XXX_INTR_DMA1_FORCE BIT_ULL(33) +#define CN6XXX_INTR_DMA0_COUNT BIT_ULL(34) +#define CN6XXX_INTR_DMA1_COUNT BIT_ULL(35) +#define CN6XXX_INTR_DMA0_TIME BIT_ULL(36) +#define CN6XXX_INTR_DMA1_TIME BIT_ULL(37) +#define CN6XXX_INTR_INSTR_DB_OF_ERR BIT_ULL(48) +#define CN6XXX_INTR_SLIST_DB_OF_ERR BIT_ULL(49) +#define CN6XXX_INTR_POUT_ERR BIT_ULL(50) +#define CN6XXX_INTR_PIN_BP_ERR BIT_ULL(51) +#define CN6XXX_INTR_PGL_ERR BIT_ULL(52) +#define CN6XXX_INTR_PDI_ERR BIT_ULL(53) +#define CN6XXX_INTR_POP_ERR BIT_ULL(54) +#define CN6XXX_INTR_PINS_ERR BIT_ULL(55) +#define CN6XXX_INTR_SPRT0_ERR BIT_ULL(56) +#define CN6XXX_INTR_SPRT1_ERR BIT_ULL(57) +#define CN6XXX_INTR_ILL_PAD_ERR BIT_ULL(60) + +#define CN6XXX_INTR_DMA0_DATA (CN6XXX_INTR_DMA0_TIME) + +#define CN6XXX_INTR_DMA1_DATA (CN6XXX_INTR_DMA1_TIME) + +#define CN6XXX_INTR_DMA_DATA \ + (CN6XXX_INTR_DMA0_DATA | CN6XXX_INTR_DMA1_DATA) + +#define CN6XXX_INTR_PKT_DATA (CN6XXX_INTR_PKT_TIME | \ + CN6XXX_INTR_PKT_COUNT) + +/* Sum of interrupts for all PCI-Express Data Interrupts */ +#define CN6XXX_INTR_PCIE_DATA \ + (CN6XXX_INTR_DMA_DATA | CN6XXX_INTR_PKT_DATA) + +#define CN6XXX_INTR_MIO \ + (CN6XXX_INTR_MIO_INT0 | CN6XXX_INTR_MIO_INT1) + +#define CN6XXX_INTR_MAC \ + (CN6XXX_INTR_MAC_INT0 | CN6XXX_INTR_MAC_INT1) + +/* Sum of interrupts for error events */ +#define CN6XXX_INTR_ERR \ + (CN6XXX_INTR_BAR0_RW_TIMEOUT_ERR \ + | CN6XXX_INTR_IO2BIG_ERR \ + | CN6XXX_INTR_M0UPB0_ERR \ + | CN6XXX_INTR_M0UPWI_ERR \ + | CN6XXX_INTR_M0UNB0_ERR \ + | CN6XXX_INTR_M0UNWI_ERR \ + | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UPWI_ERR \ + | CN6XXX_INTR_M1UPB0_ERR \ + | CN6XXX_INTR_M1UNWI_ERR \ + | CN6XXX_INTR_INSTR_DB_OF_ERR \ + | CN6XXX_INTR_SLIST_DB_OF_ERR \ + | CN6XXX_INTR_POUT_ERR \ + | CN6XXX_INTR_PIN_BP_ERR \ + | CN6XXX_INTR_PGL_ERR \ + | CN6XXX_INTR_PDI_ERR \ + | CN6XXX_INTR_POP_ERR \ + | CN6XXX_INTR_PINS_ERR \ + | CN6XXX_INTR_SPRT0_ERR \ + | CN6XXX_INTR_SPRT1_ERR \ + | CN6XXX_INTR_ILL_PAD_ERR) + +/* Programmed Mask for Interrupt Sum */ +#define CN6XXX_INTR_MASK \ + (CN6XXX_INTR_PCIE_DATA \ + | CN6XXX_INTR_DMA0_FORCE \ + | CN6XXX_INTR_DMA1_FORCE \ + | CN6XXX_INTR_MIO \ + | CN6XXX_INTR_MAC \ + | CN6XXX_INTR_ERR) + +#define CN6XXX_SLI_S2M_PORT0_CTL 0x3D80 +#define CN6XXX_SLI_S2M_PORT1_CTL 0x3D90 +#define CN6XXX_SLI_S2M_PORTX_CTL(port) \ + (CN6XXX_SLI_S2M_PORT0_CTL + (port * 0x10)) + +#define CN6XXX_SLI_INT_ENB64(port) \ + (CN6XXX_SLI_INT_ENB64_PORT0 + (port * 0x10)) + +#define CN6XXX_SLI_MAC_NUMBER 0x3E00 + +/* CN6XXX BAR1 Index registers. */ +#define CN6XXX_PEM_BAR1_INDEX000 0x00011800C00000A8ULL +#define CN6XXX_PEM_OFFSET 0x0000000001000000ULL + +#define CN6XXX_BAR1_INDEX_START CN6XXX_PEM_BAR1_INDEX000 +#define CN6XXX_PCI_BAR1_OFFSET 0x8 + +#define CN6XXX_BAR1_REG(idx, port) \ + (CN6XXX_BAR1_INDEX_START + (port * CN6XXX_PEM_OFFSET) + \ + (CN6XXX_PCI_BAR1_OFFSET * (idx))) + +/*############################ DPI #########################*/ + +#define CN6XXX_DPI_CTL 0x0001df0000000040ULL + +#define CN6XXX_DPI_DMA_CONTROL 0x0001df0000000048ULL + +#define CN6XXX_DPI_REQ_GBL_ENB 0x0001df0000000050ULL + +#define CN6XXX_DPI_REQ_ERR_RSP 0x0001df0000000058ULL + +#define CN6XXX_DPI_REQ_ERR_RST 0x0001df0000000060ULL + +#define CN6XXX_DPI_DMA_ENG0_ENB 0x0001df0000000080ULL + +#define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ + (CN6XXX_DPI_DMA_ENG0_ENB + (q_no * 8)) + +#define CN6XXX_DPI_DMA_ENG0_BUF 0x0001df0000000880ULL + +#define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ + (CN6XXX_DPI_DMA_ENG0_BUF + (q_no * 8)) + +#define CN6XXX_DPI_SLI_PRT0_CFG 0x0001df0000000900ULL +#define CN6XXX_DPI_SLI_PRT1_CFG 0x0001df0000000908ULL +#define CN6XXX_DPI_SLI_PRTX_CFG(port) \ + (CN6XXX_DPI_SLI_PRT0_CFG + (port * 0x10)) + +#define CN6XXX_DPI_DMA_COMMIT_MODE BIT_ULL(58) +#define CN6XXX_DPI_DMA_PKT_HP BIT_ULL(57) +#define CN6XXX_DPI_DMA_PKT_EN BIT_ULL(56) +#define CN6XXX_DPI_DMA_O_ES BIT_ULL(15) +#define CN6XXX_DPI_DMA_O_MODE BIT_ULL(14) + +#define CN6XXX_DPI_DMA_CTL_MASK \ + (CN6XXX_DPI_DMA_COMMIT_MODE | \ + CN6XXX_DPI_DMA_PKT_HP | \ + CN6XXX_DPI_DMA_PKT_EN | \ + CN6XXX_DPI_DMA_O_ES | \ + CN6XXX_DPI_DMA_O_MODE) + +/*############################ CIU #########################*/ + +#define CN6XXX_CIU_SOFT_BIST 0x0001070000000738ULL +#define CN6XXX_CIU_SOFT_RST 0x0001070000000740ULL + +/*############################ MIO #########################*/ +#define CN6XXX_MIO_PTP_CLOCK_CFG 0x0001070000000f00ULL +#define CN6XXX_MIO_PTP_CLOCK_LO 0x0001070000000f08ULL +#define CN6XXX_MIO_PTP_CLOCK_HI 0x0001070000000f10ULL +#define CN6XXX_MIO_PTP_CLOCK_COMP 0x0001070000000f18ULL +#define CN6XXX_MIO_PTP_TIMESTAMP 0x0001070000000f20ULL +#define CN6XXX_MIO_PTP_EVT_CNT 0x0001070000000f28ULL +#define CN6XXX_MIO_PTP_CKOUT_THRESH_LO 0x0001070000000f30ULL +#define CN6XXX_MIO_PTP_CKOUT_THRESH_HI 0x0001070000000f38ULL +#define CN6XXX_MIO_PTP_CKOUT_HI_INCR 0x0001070000000f40ULL +#define CN6XXX_MIO_PTP_CKOUT_LO_INCR 0x0001070000000f48ULL +#define CN6XXX_MIO_PTP_PPS_THRESH_LO 0x0001070000000f50ULL +#define CN6XXX_MIO_PTP_PPS_THRESH_HI 0x0001070000000f58ULL +#define CN6XXX_MIO_PTP_PPS_HI_INCR 0x0001070000000f60ULL +#define CN6XXX_MIO_PTP_PPS_LO_INCR 0x0001070000000f68ULL + +#define CN6XXX_MIO_QLM4_CFG 0x00011800000015B0ULL +#define CN6XXX_MIO_RST_BOOT 0x0001180000001600ULL + +#define CN6XXX_MIO_QLM_CFG_MASK 0x7 + +/*############################ LMC #########################*/ + +#define CN6XXX_LMC0_RESET_CTL 0x0001180088000180ULL +#define CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK 0x0000000000000001ULL + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c new file mode 100644 index 000000000000..8e830d0c0754 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c @@ -0,0 +1,198 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) +{ + u32 i; + u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 }; + + lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL); + dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n", + lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL)); + + for (i = 0; i < 6; i++) { + /* Prevent service of instruction queue for all DMA engines + * Engine 5 will remain 0. Engines 0 - 4 will be setup by + * core. + */ + lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i)); + lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i)); + dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i, + lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i))); + } + + /* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set + * separately. + */ + + lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL); + dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n", + lio_pci_readq(oct, CN6XXX_DPI_CTL)); +} + +static int lio_cn68xx_soft_reset(struct octeon_device *oct) +{ + lio_cn6xxx_soft_reset(oct); + lio_cn68xx_set_dpi_regs(oct); + + return 0; +} + +static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct) +{ + struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; + u64 pktctl, tx_pipe, max_oqs; + + pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL); + + /* 68XX specific */ + max_oqs = CFG_GET_OQ_MAX_Q(CHIP_FIELD(oct, cn6xxx, conf)); + tx_pipe = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE); + tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */ + tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */ + octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe); + + if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf)) + pktctl |= 0xF; + else + /* Disable per-port backpressure. */ + pktctl &= ~0xF; + octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl); +} + +static int lio_cn68xx_setup_device_regs(struct octeon_device *oct) +{ + lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT); + lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B); + lio_cn6xxx_enable_error_reporting(oct); + + lio_cn6xxx_setup_global_input_regs(oct); + lio_cn68xx_setup_pkt_ctl_regs(oct); + lio_cn6xxx_setup_global_output_regs(oct); + + /* Default error timeout value should be 0x200000 to avoid host hang + * when reads invalid register + */ + octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL); + + return 0; +} + +static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct) +{ + u32 val = 0; + + /* Set M_VEND1_DRP and M_VEND0_DRP bits */ + pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val); + val |= 0x3; + pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val); +} + +int lio_is_210nv(struct octeon_device *oct) +{ + u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG); + + return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0); +} + +int lio_setup_cn68xx_octeon_device(struct octeon_device *oct) +{ + struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip; + u16 card_type = LIO_410NV; + + if (octeon_map_pci_barx(oct, 0, 0)) + return 1; + + if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) { + dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n", + __func__); + octeon_unmap_pci_barx(oct, 0); + return 1; + } + + spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg); + + oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs; + oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs; + + oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs; + oct->fn_list.soft_reset = lio_cn68xx_soft_reset; + oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs; + oct->fn_list.reinit_regs = lio_cn6xxx_reinit_regs; + oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index; + + oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup; + oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write; + oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read; + + oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt; + oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt; + + oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues; + oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues; + + lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list); + + /* Determine variant of card */ + if (lio_is_210nv(oct)) + card_type = LIO_210NV; + + cn68xx->conf = (struct octeon_config *) + oct_get_config_info(oct, card_type); + if (!cn68xx->conf) { + dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n", + __func__, + (card_type == LIO_410NV) ? LIO_410NV_NAME : + LIO_210NV_NAME); + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + return 1; + } + + oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct); + + lio_cn68xx_vendor_message_fix(oct); + + return 0; +} diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h new file mode 100644 index 000000000000..d4e1c9fb0bf2 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h @@ -0,0 +1,33 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file cn68xx_device.h + * \brief Host Driver: Routines that perform CN68XX specific operations. + */ + +#ifndef __CN68XX_DEVICE_H__ +#define __CN68XX_DEVICE_H__ + +int lio_setup_cn68xx_octeon_device(struct octeon_device *oct); +int lio_is_210nv(struct octeon_device *oct); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h new file mode 100644 index 000000000000..38cddbd107b6 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h @@ -0,0 +1,51 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file cn68xx_regs.h + * \brief Host Driver: Register Address and Register Mask values for + * Octeon CN68XX devices. The register map for CN66XX is the same + * for most registers. This file has the other registers that are + * 68XX-specific. + */ + +#ifndef __CN68XX_REGS_H__ +#define __CN68XX_REGS_H__ +#include "cn66xx_regs.h" + +/*###################### REQUEST QUEUE #########################*/ + +#define CN68XX_SLI_IQ_PORT0_PKIND 0x0800 + +#define CN68XX_SLI_IQ_PORT_PKIND(iq) \ + (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET)) + +/*############################ OUTPUT QUEUE #########################*/ + +/* Starting pipe number and number of pipes used by the SLI packet output. */ +#define CN68XX_SLI_TX_PIPE 0x1230 + +/*######################## INTERRUPTS #########################*/ + +/*------------------ Interrupt Masks ----------------*/ +#define CN68XX_INTR_PIPE_ERR BIT_ULL(61) + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c new file mode 100644 index 000000000000..160f8077692c --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -0,0 +1,1216 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#include <linux/version.h> +#include <linux/netdevice.h> +#include <linux/net_tstamp.h> +#include <linux/ethtool.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" + +struct oct_mdio_cmd_context { + int octeon_id; + wait_queue_head_t wc; + int cond; +}; + +struct oct_mdio_cmd_resp { + u64 rh; + struct oct_mdio_cmd resp; + u64 status; +}; + +#define OCT_MDIO45_RESP_SIZE (sizeof(struct oct_mdio_cmd_resp)) + +/* Octeon's interface mode of operation */ +enum { + INTERFACE_MODE_DISABLED, + INTERFACE_MODE_RGMII, + INTERFACE_MODE_GMII, + INTERFACE_MODE_SPI, + INTERFACE_MODE_PCIE, + INTERFACE_MODE_XAUI, + INTERFACE_MODE_SGMII, + INTERFACE_MODE_PICMG, + INTERFACE_MODE_NPI, + INTERFACE_MODE_LOOP, + INTERFACE_MODE_SRIO, + INTERFACE_MODE_ILK, + INTERFACE_MODE_RXAUI, + INTERFACE_MODE_QSGMII, + INTERFACE_MODE_AGL, +}; + +#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0])) +#define OCT_ETHTOOL_REGDUMP_LEN 4096 +#define OCT_ETHTOOL_REGSVER 1 + +static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = { + "Instr posted", + "Instr processed", + "Instr dropped", + "Bytes Sent", + "Sgentry_sent", + "Inst cntreg", + "Tx done", + "Tx Iq busy", + "Tx dropped", + "Tx bytes", +}; + +static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = { + "OQ Pkts Received", + "OQ Bytes Received", + "Dropped no dispatch", + "Dropped nomem", + "Dropped toomany", + "Stack RX cnt", + "Stack RX Bytes", + "RX dropped", +}; + +#define OCTNIC_NCMD_AUTONEG_ON 0x1 +#define OCTNIC_NCMD_PHY_ON 0x2 + +static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_link_info *linfo; + + linfo = &lio->linfo; + + if (linfo->link.s.interface == INTERFACE_MODE_XAUI || + linfo->link.s.interface == INTERFACE_MODE_RXAUI) { + ecmd->port = PORT_FIBRE; + ecmd->supported = + (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE | + SUPPORTED_Pause); + ecmd->advertising = + (ADVERTISED_10000baseT_Full | ADVERTISED_Pause); + ecmd->transceiver = XCVR_EXTERNAL; + ecmd->autoneg = AUTONEG_DISABLE; + + } else { + dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n"); + } + + if (linfo->link.s.status) { + ethtool_cmd_speed_set(ecmd, linfo->link.s.speed); + ecmd->duplex = linfo->link.s.duplex; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + return 0; +} + +static void +lio_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) +{ + struct lio *lio; + struct octeon_device *oct; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); + strcpy(drvinfo->driver, "liquidio"); + strcpy(drvinfo->version, LIQUIDIO_VERSION); + strncpy(drvinfo->fw_version, oct->fw_info.liquidio_firmware_version, + ETHTOOL_FWVERS_LEN); + strncpy(drvinfo->bus_info, pci_name(oct->pci_dev), 32); + drvinfo->regdump_len = OCT_ETHTOOL_REGDUMP_LEN; +} + +static void +lio_ethtool_get_channels(struct net_device *dev, + struct ethtool_channels *channel) +{ + struct lio *lio = GET_LIO(dev); + struct octeon_device *oct = lio->oct_dev; + u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0; + + if (OCTEON_CN6XXX(oct)) { + struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); + + max_rx = CFG_GET_OQ_MAX_Q(conf6x); + max_tx = CFG_GET_IQ_MAX_Q(conf6x); + rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx); + tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx); + } + + channel->max_rx = max_rx; + channel->max_tx = max_tx; + channel->rx_count = rx_count; + channel->tx_count = tx_count; +} + +static int lio_get_eeprom_len(struct net_device *netdev) +{ + u8 buf[128]; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_board_info *board_info; + int len; + + board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); + len = sprintf(buf, "boardname:%s serialnum:%s maj:%lld min:%lld\n", + board_info->name, board_info->serial_number, + board_info->major, board_info->minor); + + return len; +} + +static int +lio_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, + u8 *bytes) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_board_info *board_info; + int len; + + if (eeprom->offset != 0) + return -EINVAL; + + eeprom->magic = oct_dev->pci_dev->vendor; + board_info = (struct octeon_board_info *)(&oct_dev->boardinfo); + len = + sprintf((char *)bytes, + "boardname:%s serialnum:%s maj:%lld min:%lld\n", + board_info->name, board_info->serial_number, + board_info->major, board_info->minor); + + return 0; +} + +static int octnet_gpio_access(struct net_device *netdev, int addr, int val) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.ncmd.s.param2 = addr; + nctrl.ncmd.s.param3 = val; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + nparams.resp_order = OCTEON_RESP_ORDERED; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n"); + return -EINVAL; + } + + return 0; +} + +/* Callback for when mdio command response arrives + */ +static void octnet_mdio_resp_callback(struct octeon_device *oct, + u32 status, + void *buf) +{ + struct oct_mdio_cmd_resp *mdio_cmd_rsp; + struct oct_mdio_cmd_context *mdio_cmd_ctx; + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + + mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; + mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; + + oct = lio_get_device(mdio_cmd_ctx->octeon_id); + if (status) { + dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n", + CVM_CAST64(status)); + ACCESS_ONCE(mdio_cmd_ctx->cond) = -1; + } else { + ACCESS_ONCE(mdio_cmd_ctx->cond) = 1; + } + wake_up_interruptible(&mdio_cmd_ctx->wc); +} + +/* This routine provides PHY access routines for + * mdio clause45 . + */ +static int +octnet_mdio45_access(struct lio *lio, int op, int loc, int *value) +{ + struct octeon_device *oct_dev = lio->oct_dev; + struct octeon_soft_command *sc; + struct oct_mdio_cmd_resp *mdio_cmd_rsp; + struct oct_mdio_cmd_context *mdio_cmd_ctx; + struct oct_mdio_cmd *mdio_cmd; + int retval = 0; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + sizeof(struct oct_mdio_cmd), + sizeof(struct oct_mdio_cmd_resp), + sizeof(struct oct_mdio_cmd_context)); + + if (!sc) + return -ENOMEM; + + mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr; + mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr; + mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr; + + ACCESS_ONCE(mdio_cmd_ctx->cond) = 0; + mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev); + mdio_cmd->op = op; + mdio_cmd->mdio_addr = loc; + if (op) + mdio_cmd->value1 = *value; + mdio_cmd->value2 = lio->linfo.ifidx; + octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8); + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45, + 0, 0, 0); + + sc->wait_time = 1000; + sc->callback = octnet_mdio_resp_callback; + sc->callback_arg = sc; + + init_waitqueue_head(&mdio_cmd_ctx->wc); + + retval = octeon_send_soft_command(oct_dev, sc); + + if (retval) { + dev_err(&oct_dev->pci_dev->dev, + "octnet_mdio45_access instruction failed status: %x\n", + retval); + retval = -EBUSY; + } else { + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived + */ + sleep_cond(&mdio_cmd_ctx->wc, &mdio_cmd_ctx->cond); + retval = mdio_cmd_rsp->status; + if (retval) { + dev_err(&oct_dev->pci_dev->dev, "octnet mdio45 access failed\n"); + retval = -EBUSY; + } else { + octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp), + sizeof(struct oct_mdio_cmd) / 8); + + if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) { + if (!op) + *value = mdio_cmd_rsp->resp.value1; + } else { + retval = -EINVAL; + } + } + } + + octeon_free_soft_command(oct_dev, sc); + + return retval; +} + +static int lio_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + int value, ret; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (oct->chip_id == OCTEON_CN66XX) { + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_DRIVEON); + return 2; + + } else if (oct->chip_id == OCTEON_CN68XX) { + /* Save the current LED settings */ + ret = octnet_mdio45_access(lio, 0, + LIO68XX_LED_BEACON_ADDR, + &lio->phy_beacon_val); + if (ret) + return ret; + + ret = octnet_mdio45_access(lio, 0, + LIO68XX_LED_CTRL_ADDR, + &lio->led_ctrl_val); + if (ret) + return ret; + + /* Configure Beacon values */ + value = LIO68XX_LED_BEACON_CFGON; + ret = + octnet_mdio45_access(lio, 1, + LIO68XX_LED_BEACON_ADDR, + &value); + if (ret) + return ret; + + value = LIO68XX_LED_CTRL_CFGON; + ret = + octnet_mdio45_access(lio, 1, + LIO68XX_LED_CTRL_ADDR, + &value); + if (ret) + return ret; + } else { + return -EINVAL; + } + break; + + case ETHTOOL_ID_ON: + if (oct->chip_id == OCTEON_CN66XX) { + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_HIGH); + + } else if (oct->chip_id == OCTEON_CN68XX) { + return -EINVAL; + } else { + return -EINVAL; + } + break; + + case ETHTOOL_ID_OFF: + if (oct->chip_id == OCTEON_CN66XX) + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_LOW); + else if (oct->chip_id == OCTEON_CN68XX) + return -EINVAL; + else + return -EINVAL; + + break; + + case ETHTOOL_ID_INACTIVE: + if (oct->chip_id == OCTEON_CN66XX) { + octnet_gpio_access(netdev, VITESSE_PHY_GPIO_CFG, + VITESSE_PHY_GPIO_DRIVEOFF); + } else if (oct->chip_id == OCTEON_CN68XX) { + /* Restore LED settings */ + ret = octnet_mdio45_access(lio, 1, + LIO68XX_LED_CTRL_ADDR, + &lio->led_ctrl_val); + if (ret) + return ret; + + octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR, + &lio->phy_beacon_val); + if (ret) + return ret; + + } else { + return -EINVAL; + } + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void +lio_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0, + rx_pending = 0; + + if (OCTEON_CN6XXX(oct)) { + struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); + + tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS; + rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS; + rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx); + tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx); + } + + if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) { + ering->rx_pending = 0; + ering->rx_max_pending = 0; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = rx_pending; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = rx_max_pending; + } else { + ering->rx_pending = rx_pending; + ering->rx_max_pending = rx_max_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + } + + ering->tx_pending = tx_pending; + ering->tx_max_pending = tx_max_pending; +} + +static u32 lio_get_msglevel(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + return lio->msg_enable; +} + +static void lio_set_msglevel(struct net_device *netdev, u32 msglvl) +{ + struct lio *lio = GET_LIO(netdev); + + if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) { + if (msglvl & NETIF_MSG_HW) + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_ENABLE); + else + liquidio_set_feature(netdev, + OCTNET_CMD_VERBOSE_DISABLE); + } + + lio->msg_enable = msglvl; +} + +static void +lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) +{ + /* Notes: Not supporting any auto negotiation in these + * drivers. Just report pause frame support. + */ + pause->tx_pause = 1; + pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */ +} + +static void +lio_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int i = 0, j; + + for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) { + if (!(oct_dev->io_qmask.iq & (1UL << j))) + continue; + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted); + data[i++] = + CVM_CAST64( + oct_dev->instr_queue[j]->stats.instr_processed); + data[i++] = + CVM_CAST64( + oct_dev->instr_queue[j]->stats.instr_dropped); + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent); + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent); + data[i++] = + readl(oct_dev->instr_queue[j]->inst_cnt_reg); + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done); + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy); + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped); + data[i++] = + CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes); + } + + /* for (j = 0; j < oct_dev->num_oqs; j++){ */ + for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) { + if (!(oct_dev->io_qmask.oq & (1UL << j))) + continue; + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem); + data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received); + data[i++] = + CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped); + } +} + +static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + int num_iq_stats, num_oq_stats, i, j; + + num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings); + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + if (!(oct_dev->io_qmask.iq & (1UL << i))) + continue; + for (j = 0; j < num_iq_stats; j++) { + sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } + + num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings); + /* for (i = 0; i < oct_dev->num_oqs; i++) { */ + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + if (!(oct_dev->io_qmask.oq & (1UL << i))) + continue; + for (j = 0; j < num_oq_stats; j++) { + sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]); + data += ETH_GSTRING_LEN; + } + } +} + +static int lio_get_sset_count(struct net_device *netdev, int sset) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct_dev = lio->oct_dev; + + return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) + + (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs); +} + +static int lio_get_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *intr_coal) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + struct octeon_instr_queue *iq; + struct oct_intrmod_cfg *intrmod_cfg; + + intrmod_cfg = &oct->intrmod; + + switch (oct->chip_id) { + /* case OCTEON_CN73XX: Todo */ + /* break; */ + case OCTEON_CN68XX: + case OCTEON_CN66XX: + if (!intrmod_cfg->intrmod_enable) { + intr_coal->rx_coalesce_usecs = + CFG_GET_OQ_INTR_TIME(cn6xxx->conf); + intr_coal->rx_max_coalesced_frames = + CFG_GET_OQ_INTR_PKT(cn6xxx->conf); + } else { + intr_coal->use_adaptive_rx_coalesce = + intrmod_cfg->intrmod_enable; + intr_coal->rate_sample_interval = + intrmod_cfg->intrmod_check_intrvl; + intr_coal->pkt_rate_high = + intrmod_cfg->intrmod_maxpkt_ratethr; + intr_coal->pkt_rate_low = + intrmod_cfg->intrmod_minpkt_ratethr; + intr_coal->rx_max_coalesced_frames_high = + intrmod_cfg->intrmod_maxcnt_trigger; + intr_coal->rx_coalesce_usecs_high = + intrmod_cfg->intrmod_maxtmr_trigger; + intr_coal->rx_coalesce_usecs_low = + intrmod_cfg->intrmod_mintmr_trigger; + intr_coal->rx_max_coalesced_frames_low = + intrmod_cfg->intrmod_mincnt_trigger; + } + + iq = oct->instr_queue[lio->linfo.txpciq[0]]; + intr_coal->tx_max_coalesced_frames = iq->fill_threshold; + break; + + default: + netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n"); + return -EINVAL; + } + + return 0; +} + +/* Callback function for intrmod */ +static void octnet_intrmod_callback(struct octeon_device *oct_dev, + u32 status, + void *ptr) +{ + struct oct_intrmod_cmd *cmd = ptr; + struct octeon_soft_command *sc = cmd->sc; + + oct_dev = cmd->oct_dev; + + if (status) + dev_err(&oct_dev->pci_dev->dev, "intrmod config failed. Status: %llx\n", + CVM_CAST64(status)); + else + dev_info(&oct_dev->pci_dev->dev, + "Rx-Adaptive Interrupt moderation enabled:%llx\n", + oct_dev->intrmod.intrmod_enable); + + octeon_free_soft_command(oct_dev, sc); +} + +/* Configure interrupt moderation parameters */ +static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg) +{ + struct octeon_soft_command *sc; + struct oct_intrmod_cmd *cmd; + struct oct_intrmod_cfg *cfg; + int retval; + struct octeon_device *oct_dev = (struct octeon_device *)oct; + + /* Alloc soft command */ + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct_dev, + sizeof(struct oct_intrmod_cfg), + 0, + sizeof(struct oct_intrmod_cmd)); + + if (!sc) + return -ENOMEM; + + cmd = (struct oct_intrmod_cmd *)sc->ctxptr; + cfg = (struct oct_intrmod_cfg *)sc->virtdptr; + + memcpy(cfg, intr_cfg, sizeof(struct oct_intrmod_cfg)); + octeon_swap_8B_data((u64 *)cfg, (sizeof(struct oct_intrmod_cfg)) / 8); + cmd->sc = sc; + cmd->cfg = cfg; + cmd->oct_dev = oct_dev; + + octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, + OPCODE_NIC_INTRMOD_CFG, 0, 0, 0); + + sc->callback = octnet_intrmod_callback; + sc->callback_arg = cmd; + sc->wait_time = 1000; + + retval = octeon_send_soft_command(oct_dev, sc); + if (retval) { + octeon_free_soft_command(oct_dev, sc); + return -EINVAL; + } + + return 0; +} + +/* Enable/Disable auto interrupt Moderation */ +static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce + *intr_coal, int adaptive) +{ + int ret = 0; + struct octeon_device *oct = lio->oct_dev; + struct oct_intrmod_cfg *intrmod_cfg; + + intrmod_cfg = &oct->intrmod; + + if (adaptive) { + if (intr_coal->rate_sample_interval) + intrmod_cfg->intrmod_check_intrvl = + intr_coal->rate_sample_interval; + else + intrmod_cfg->intrmod_check_intrvl = + LIO_INTRMOD_CHECK_INTERVAL; + + if (intr_coal->pkt_rate_high) + intrmod_cfg->intrmod_maxpkt_ratethr = + intr_coal->pkt_rate_high; + else + intrmod_cfg->intrmod_maxpkt_ratethr = + LIO_INTRMOD_MAXPKT_RATETHR; + + if (intr_coal->pkt_rate_low) + intrmod_cfg->intrmod_minpkt_ratethr = + intr_coal->pkt_rate_low; + else + intrmod_cfg->intrmod_minpkt_ratethr = + LIO_INTRMOD_MINPKT_RATETHR; + + if (intr_coal->rx_max_coalesced_frames_high) + intrmod_cfg->intrmod_maxcnt_trigger = + intr_coal->rx_max_coalesced_frames_high; + else + intrmod_cfg->intrmod_maxcnt_trigger = + LIO_INTRMOD_MAXCNT_TRIGGER; + + if (intr_coal->rx_coalesce_usecs_high) + intrmod_cfg->intrmod_maxtmr_trigger = + intr_coal->rx_coalesce_usecs_high; + else + intrmod_cfg->intrmod_maxtmr_trigger = + LIO_INTRMOD_MAXTMR_TRIGGER; + + if (intr_coal->rx_coalesce_usecs_low) + intrmod_cfg->intrmod_mintmr_trigger = + intr_coal->rx_coalesce_usecs_low; + else + intrmod_cfg->intrmod_mintmr_trigger = + LIO_INTRMOD_MINTMR_TRIGGER; + + if (intr_coal->rx_max_coalesced_frames_low) + intrmod_cfg->intrmod_mincnt_trigger = + intr_coal->rx_max_coalesced_frames_low; + else + intrmod_cfg->intrmod_mincnt_trigger = + LIO_INTRMOD_MINCNT_TRIGGER; + } + + intrmod_cfg->intrmod_enable = adaptive; + ret = octnet_set_intrmod_cfg(oct, intrmod_cfg); + + return ret; +} + +static int +oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal) +{ + int ret; + struct octeon_device *oct = lio->oct_dev; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + u32 rx_max_coalesced_frames; + + if (!intr_coal->rx_max_coalesced_frames) + rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT; + else + rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames; + + /* Disable adaptive interrupt modulation */ + ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); + if (ret) + return ret; + + /* Config Cnt based interrupt values */ + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS, + rx_max_coalesced_frames); + CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames); + return 0; +} + +static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce + *intr_coal) +{ + int ret; + struct octeon_device *oct = lio->oct_dev; + struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip; + u32 time_threshold, rx_coalesce_usecs; + + if (!intr_coal->rx_coalesce_usecs) + rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME; + else + rx_coalesce_usecs = intr_coal->rx_coalesce_usecs; + + /* Disable adaptive interrupt modulation */ + ret = oct_cfg_adaptive_intr(lio, intr_coal, 0); + if (ret) + return ret; + + /* Config Time based interrupt values */ + time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs); + octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold); + CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs); + + return 0; +} + +static int lio_set_intr_coalesce(struct net_device *netdev, + struct ethtool_coalesce *intr_coal) +{ + struct lio *lio = GET_LIO(netdev); + int ret; + struct octeon_device *oct = lio->oct_dev; + u32 j, q_no; + + if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) && + (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) { + for (j = 0; j < lio->linfo.num_txpciq; j++) { + q_no = lio->linfo.txpciq[j]; + oct->instr_queue[q_no]->fill_threshold = + intr_coal->tx_max_coalesced_frames; + } + } else { + dev_err(&oct->pci_dev->dev, + "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n", + intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN, + CN6XXX_DB_MAX); + return -EINVAL; + } + + /* User requested adaptive-rx on */ + if (intr_coal->use_adaptive_rx_coalesce) { + ret = oct_cfg_adaptive_intr(lio, intr_coal, 1); + if (ret) + goto ret_intrmod; + } + + /* User requested adaptive-rx off and rx coalesce */ + if ((intr_coal->rx_coalesce_usecs) && + (!intr_coal->use_adaptive_rx_coalesce)) { + ret = oct_cfg_rx_intrtime(lio, intr_coal); + if (ret) + goto ret_intrmod; + } + + /* User requested adaptive-rx off and rx coalesce */ + if ((intr_coal->rx_max_coalesced_frames) && + (!intr_coal->use_adaptive_rx_coalesce)) { + ret = oct_cfg_rx_intrcnt(lio, intr_coal); + if (ret) + goto ret_intrmod; + } + + /* User requested adaptive-rx off, so use default coalesce params */ + if ((!intr_coal->rx_max_coalesced_frames) && + (!intr_coal->use_adaptive_rx_coalesce) && + (!intr_coal->rx_coalesce_usecs)) { + dev_info(&oct->pci_dev->dev, + "Turning off adaptive-rx interrupt moderation\n"); + dev_info(&oct->pci_dev->dev, + "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n", + CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT); + ret = oct_cfg_rx_intrtime(lio, intr_coal); + if (ret) + goto ret_intrmod; + + ret = oct_cfg_rx_intrcnt(lio, intr_coal); + if (ret) + goto ret_intrmod; + } + + return 0; +ret_intrmod: + return ret; +} + +static int lio_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct lio *lio = GET_LIO(netdev); + + info->so_timestamping = + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE; + + if (lio->ptp_clock) + info->phc_index = ptp_clock_index(lio->ptp_clock); + else + info->phc_index = -1; + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT); + + return 0; +} + +static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct oct_link_info *linfo; + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + int ret = 0; + + /* get the link info */ + linfo = &lio->linfo; + + if (ecmd->autoneg != AUTONEG_ENABLE && ecmd->autoneg != AUTONEG_DISABLE) + return -EINVAL; + + if (ecmd->autoneg == AUTONEG_DISABLE && ((ecmd->speed != SPEED_100 && + ecmd->speed != SPEED_10) || + (ecmd->duplex != DUPLEX_HALF && + ecmd->duplex != DUPLEX_FULL))) + return -EINVAL; + + /* Ethtool Support is not provided for XAUI and RXAUI Interfaces + * as they operate at fixed Speed and Duplex settings + */ + if (linfo->link.s.interface == INTERFACE_MODE_XAUI || + linfo->link.s.interface == INTERFACE_MODE_RXAUI) { + dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n"); + return -EINVAL; + } + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS; + nctrl.wait_time = 1000; + nctrl.netpndev = (u64)netdev; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex + * to SE core application using ncmd.s.more & ncmd.s.param + */ + if (ecmd->autoneg == AUTONEG_ENABLE) { + /* Autoneg ON */ + nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON | + OCTNIC_NCMD_AUTONEG_ON; + nctrl.ncmd.s.param2 = ecmd->advertising; + } else { + /* Autoneg OFF */ + nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON; + + nctrl.ncmd.s.param3 = ecmd->duplex; + + nctrl.ncmd.s.param2 = ecmd->speed; + } + + nparams.resp_order = OCTEON_RESP_ORDERED; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to set settings\n"); + return -1; + } + + return 0; +} + +static int lio_nway_reset(struct net_device *netdev) +{ + if (netif_running(netdev)) { + struct ethtool_cmd ecmd; + + memset(&ecmd, 0, sizeof(struct ethtool_cmd)); + ecmd.autoneg = 0; + ecmd.speed = 0; + ecmd.duplex = 0; + lio_set_settings(netdev, &ecmd); + } + return 0; +} + +/* Return register dump len. */ +static int lio_get_regs_len(struct net_device *dev) +{ + return OCT_ETHTOOL_REGDUMP_LEN; +} + +static int cn6xxx_read_csr_reg(char *s, struct octeon_device *oct) +{ + u32 reg; + int i, len = 0; + + /* PCI Window Registers */ + + len += sprintf(s + len, "\n\t Octeon CSR Registers\n\n"); + reg = CN6XXX_WIN_WR_ADDR_LO; + len += sprintf(s + len, "\n[%02x] (WIN_WR_ADDR_LO): %08x\n", + CN6XXX_WIN_WR_ADDR_LO, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_WR_ADDR_HI; + len += sprintf(s + len, "[%02x] (WIN_WR_ADDR_HI): %08x\n", + CN6XXX_WIN_WR_ADDR_HI, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_RD_ADDR_LO; + len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_LO): %08x\n", + CN6XXX_WIN_RD_ADDR_LO, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_RD_ADDR_HI; + len += sprintf(s + len, "[%02x] (WIN_RD_ADDR_HI): %08x\n", + CN6XXX_WIN_RD_ADDR_HI, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_WR_DATA_LO; + len += sprintf(s + len, "[%02x] (WIN_WR_DATA_LO): %08x\n", + CN6XXX_WIN_WR_DATA_LO, octeon_read_csr(oct, reg)); + reg = CN6XXX_WIN_WR_DATA_HI; + len += sprintf(s + len, "[%02x] (WIN_WR_DATA_HI): %08x\n", + CN6XXX_WIN_WR_DATA_HI, octeon_read_csr(oct, reg)); + len += sprintf(s + len, "[%02x] (WIN_WR_MASK_REG): %08x\n", + CN6XXX_WIN_WR_MASK_REG, + octeon_read_csr(oct, CN6XXX_WIN_WR_MASK_REG)); + + /* PCI Interrupt Register */ + len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 0): %08x\n", + CN6XXX_SLI_INT_ENB64_PORT0, octeon_read_csr(oct, + CN6XXX_SLI_INT_ENB64_PORT0)); + len += sprintf(s + len, "\n[%x] (INT_ENABLE PORT 1): %08x\n", + CN6XXX_SLI_INT_ENB64_PORT1, + octeon_read_csr(oct, CN6XXX_SLI_INT_ENB64_PORT1)); + len += sprintf(s + len, "[%x] (INT_SUM): %08x\n", CN6XXX_SLI_INT_SUM64, + octeon_read_csr(oct, CN6XXX_SLI_INT_SUM64)); + + /* PCI Output queue registers */ + for (i = 0; i < oct->num_oqs; i++) { + reg = CN6XXX_SLI_OQ_PKTS_SENT(i); + len += sprintf(s + len, "\n[%x] (PKTS_SENT_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + reg = CN6XXX_SLI_OQ_PKTS_CREDIT(i); + len += sprintf(s + len, "[%x] (PKT_CREDITS_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + } + reg = CN6XXX_SLI_OQ_INT_LEVEL_PKTS; + len += sprintf(s + len, "\n[%x] (PKTS_SENT_INT_LEVEL): %08x\n", + reg, octeon_read_csr(oct, reg)); + reg = CN6XXX_SLI_OQ_INT_LEVEL_TIME; + len += sprintf(s + len, "[%x] (PKTS_SENT_TIME): %08x\n", + reg, octeon_read_csr(oct, reg)); + + /* PCI Input queue registers */ + for (i = 0; i <= 3; i++) { + u32 reg; + + reg = CN6XXX_SLI_IQ_DOORBELL(i); + len += sprintf(s + len, "\n[%x] (INSTR_DOORBELL_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + reg = CN6XXX_SLI_IQ_INSTR_COUNT(i); + len += sprintf(s + len, "[%x] (INSTR_COUNT_%d): %08x\n", + reg, i, octeon_read_csr(oct, reg)); + } + + /* PCI DMA registers */ + + len += sprintf(s + len, "\n[%x] (DMA_CNT_0): %08x\n", + CN6XXX_DMA_CNT(0), + octeon_read_csr(oct, CN6XXX_DMA_CNT(0))); + reg = CN6XXX_DMA_PKT_INT_LEVEL(0); + len += sprintf(s + len, "[%x] (DMA_INT_LEV_0): %08x\n", + CN6XXX_DMA_PKT_INT_LEVEL(0), octeon_read_csr(oct, reg)); + reg = CN6XXX_DMA_TIME_INT_LEVEL(0); + len += sprintf(s + len, "[%x] (DMA_TIME_0): %08x\n", + CN6XXX_DMA_TIME_INT_LEVEL(0), + octeon_read_csr(oct, reg)); + + len += sprintf(s + len, "\n[%x] (DMA_CNT_1): %08x\n", + CN6XXX_DMA_CNT(1), + octeon_read_csr(oct, CN6XXX_DMA_CNT(1))); + reg = CN6XXX_DMA_PKT_INT_LEVEL(1); + len += sprintf(s + len, "[%x] (DMA_INT_LEV_1): %08x\n", + CN6XXX_DMA_PKT_INT_LEVEL(1), + octeon_read_csr(oct, reg)); + reg = CN6XXX_DMA_PKT_INT_LEVEL(1); + len += sprintf(s + len, "[%x] (DMA_TIME_1): %08x\n", + CN6XXX_DMA_TIME_INT_LEVEL(1), + octeon_read_csr(oct, reg)); + + /* PCI Index registers */ + + len += sprintf(s + len, "\n"); + + for (i = 0; i < 16; i++) { + reg = lio_pci_readq(oct, CN6XXX_BAR1_REG(i, oct->pcie_port)); + len += sprintf(s + len, "[%llx] (BAR1_INDEX_%02d): %08x\n", + CN6XXX_BAR1_REG(i, oct->pcie_port), i, reg); + } + + return len; +} + +static int cn6xxx_read_config_reg(char *s, struct octeon_device *oct) +{ + u32 val; + int i, len = 0; + + /* PCI CONFIG Registers */ + + len += sprintf(s + len, + "\n\t Octeon Config space Registers\n\n"); + + for (i = 0; i <= 13; i++) { + pci_read_config_dword(oct->pci_dev, (i * 4), &val); + len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", + (i * 4), i, val); + } + + for (i = 30; i <= 34; i++) { + pci_read_config_dword(oct->pci_dev, (i * 4), &val); + len += sprintf(s + len, "[0x%x] (Config[%d]): 0x%08x\n", + (i * 4), i, val); + } + + return len; +} + +/* Return register dump user app. */ +static void lio_get_regs(struct net_device *dev, + struct ethtool_regs *regs, void *regbuf) +{ + struct lio *lio = GET_LIO(dev); + int len = 0; + struct octeon_device *oct = lio->oct_dev; + + memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN); + regs->version = OCT_ETHTOOL_REGSVER; + + switch (oct->chip_id) { + /* case OCTEON_CN73XX: Todo */ + case OCTEON_CN68XX: + case OCTEON_CN66XX: + len += cn6xxx_read_csr_reg(regbuf + len, oct); + len += cn6xxx_read_config_reg(regbuf + len, oct); + break; + default: + dev_err(&oct->pci_dev->dev, "%s Unknown chipid: %d\n", + __func__, oct->chip_id); + } +} + +static const struct ethtool_ops lio_ethtool_ops = { + .get_settings = lio_get_settings, + .get_link = ethtool_op_get_link, + .get_drvinfo = lio_get_drvinfo, + .get_ringparam = lio_ethtool_get_ringparam, + .get_channels = lio_ethtool_get_channels, + .set_phys_id = lio_set_phys_id, + .get_eeprom_len = lio_get_eeprom_len, + .get_eeprom = lio_get_eeprom, + .get_strings = lio_get_strings, + .get_ethtool_stats = lio_get_ethtool_stats, + .get_pauseparam = lio_get_pauseparam, + .get_regs_len = lio_get_regs_len, + .get_regs = lio_get_regs, + .get_msglevel = lio_get_msglevel, + .set_msglevel = lio_set_msglevel, + .get_sset_count = lio_get_sset_count, + .nway_reset = lio_nway_reset, + .set_settings = lio_set_settings, + .get_coalesce = lio_get_intr_coalesce, + .set_coalesce = lio_set_intr_coalesce, + .get_ts_info = lio_get_ts_info, +}; + +void liquidio_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &lio_ethtool_ops; +} diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c new file mode 100644 index 000000000000..c75f51737997 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -0,0 +1,3667 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#include <linux/version.h> +#include <linux/module.h> +#include <linux/crc32.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/net_tstamp.h> +#include <linux/if_vlan.h> +#include <linux/firmware.h> +#include <linux/ethtool.h> +#include <linux/ptp_clock_kernel.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/workqueue.h> +#include <linux/interrupt.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" + +MODULE_AUTHOR("Cavium Networks, <support@cavium.com>"); +MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(LIQUIDIO_VERSION); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME LIO_FW_NAME_SUFFIX); +MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME LIO_FW_NAME_SUFFIX); + +static int ddr_timeout = 10000; +module_param(ddr_timeout, int, 0644); +MODULE_PARM_DESC(ddr_timeout, + "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check"); + +static u32 console_bitmask; +module_param(console_bitmask, int, 0644); +MODULE_PARM_DESC(console_bitmask, + "Bitmask indicating which consoles have debug output redirected to syslog."); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) + +static int debug = -1; +module_param(debug, int, 0644); +MODULE_PARM_DESC(debug, "NETIF_MSG debug bits"); + +static char fw_type[LIO_MAX_FW_TYPE_LEN]; +module_param_string(fw_type, fw_type, sizeof(fw_type), 0000); +MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\""); + +static int conf_type; +module_param(conf_type, int, 0); +MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs"); + +/* Bit mask values for lio->ifstate */ +#define LIO_IFSTATE_DROQ_OPS 0x01 +#define LIO_IFSTATE_REGISTERED 0x02 +#define LIO_IFSTATE_RUNNING 0x04 +#define LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08 + +/* Polling interval for determining when NIC application is alive */ +#define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100 + +/* runtime link query interval */ +#define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000 + +struct liquidio_if_cfg_context { + int octeon_id; + + wait_queue_head_t wc; + + int cond; +}; + +struct liquidio_if_cfg_resp { + u64 rh; + struct liquidio_if_cfg_info cfg_info; + u64 status; +}; + +struct oct_link_status_resp { + u64 rh; + struct oct_link_info link_info; + u64 status; +}; + +struct oct_timestamp_resp { + u64 rh; + u64 timestamp; + u64 status; +}; + +#define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp)) + +union tx_info { + u64 u64; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u16 gso_size; + u16 gso_segs; + u32 reserved; +#else + u32 reserved; + u16 gso_segs; + u16 gso_size; +#endif + } s; +}; + +/** Octeon device properties to be used by the NIC module. + * Each octeon device in the system will be represented + * by this structure in the NIC module. + */ + +#define OCTNIC_MAX_SG (MAX_SKB_FRAGS) + +#define OCTNIC_GSO_MAX_HEADER_SIZE 128 +#define OCTNIC_GSO_MAX_SIZE (GSO_MAX_SIZE - OCTNIC_GSO_MAX_HEADER_SIZE) + +/** Structure of a node in list of gather components maintained by + * NIC driver for each network device. + */ +struct octnic_gather { + /** List manipulation. Next and prev pointers. */ + struct list_head list; + + /** Size of the gather component at sg in bytes. */ + int sg_size; + + /** Number of bytes that sg was adjusted to make it 8B-aligned. */ + int adjust; + + /** Gather component that can accommodate max sized fragment list + * received from the IP layer. + */ + struct octeon_sg_entry *sg; +}; + +/** This structure is used by NIC driver to store information required + * to free the sk_buff when the packet has been fetched by Octeon. + * Bytes offset below assume worst-case of a 64-bit system. + */ +struct octnet_buf_free_info { + /** Bytes 1-8. Pointer to network device private structure. */ + struct lio *lio; + + /** Bytes 9-16. Pointer to sk_buff. */ + struct sk_buff *skb; + + /** Bytes 17-24. Pointer to gather list. */ + struct octnic_gather *g; + + /** Bytes 25-32. Physical address of skb->data or gather list. */ + u64 dptr; + + /** Bytes 33-47. Piggybacked soft command, if any */ + struct octeon_soft_command *sc; +}; + +struct handshake { + struct completion init; + struct completion started; + struct pci_dev *pci_dev; + int init_ok; + int started_ok; +}; + +struct octeon_device_priv { + /** Tasklet structures for this device. */ + struct tasklet_struct droq_tasklet; + unsigned long napi_mask; +}; + +static int octeon_device_init(struct octeon_device *); +static void liquidio_remove(struct pci_dev *pdev); +static int liquidio_probe(struct pci_dev *pdev, + const struct pci_device_id *ent); + +static struct handshake handshake[MAX_OCTEON_DEVICES]; +static struct completion first_stage; + +void octeon_droq_bh(unsigned long pdev) +{ + int q_no; + int reschedule = 0; + struct octeon_device *oct = (struct octeon_device *)pdev; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */ + for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) { + if (!(oct->io_qmask.oq & (1UL << q_no))) + continue; + reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], + MAX_PACKET_BUDGET); + } + + if (reschedule) + tasklet_schedule(&oct_priv->droq_tasklet); +} + +int lio_wait_for_oq_pkts(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + int retry = 100, pkt_cnt = 0, pending_pkts = 0; + int i; + + do { + pending_pkts = 0; + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + if (!(oct->io_qmask.oq & (1UL << i))) + continue; + pkt_cnt += octeon_droq_check_hw_for_pkts(oct, + oct->droq[i]); + } + if (pkt_cnt > 0) { + pending_pkts += pkt_cnt; + tasklet_schedule(&oct_priv->droq_tasklet); + } + pkt_cnt = 0; + schedule_timeout_uninterruptible(1); + + } while (retry-- && pending_pkts); + + return pkt_cnt; +} + +void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, + unsigned int bytes_compl) +{ + struct netdev_queue *netdev_queue = txq; + + netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl); +} + +void octeon_update_tx_completion_counters(void *buf, int reqtype, + unsigned int *pkts_compl, + unsigned int *bytes_compl) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb = NULL; + struct octeon_soft_command *sc; + + switch (reqtype) { + case REQTYPE_NORESP_NET: + case REQTYPE_NORESP_NET_SG: + finfo = buf; + skb = finfo->skb; + break; + + case REQTYPE_RESP_NET_SG: + case REQTYPE_RESP_NET: + sc = buf; + skb = sc->callback_arg; + break; + + default: + return; + } + + (*pkts_compl)++; + *bytes_compl += skb->len; +} + +void octeon_report_sent_bytes_to_bql(void *buf, int reqtype) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct octeon_soft_command *sc; + struct netdev_queue *txq; + + switch (reqtype) { + case REQTYPE_NORESP_NET: + case REQTYPE_NORESP_NET_SG: + finfo = buf; + skb = finfo->skb; + break; + + case REQTYPE_RESP_NET_SG: + case REQTYPE_RESP_NET: + sc = buf; + skb = sc->callback_arg; + break; + + default: + return; + } + + txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb)); + netdev_tx_sent_queue(txq, skb->len); +} + +int octeon_console_debug_enabled(u32 console) +{ + return (console_bitmask >> (console)) & 0x1; +} + +/** + * \brief Forces all IO queues off on a given device + * @param oct Pointer to Octeon device + */ +static void force_io_queues_off(struct octeon_device *oct) +{ + if ((oct->chip_id == OCTEON_CN66XX) || + (oct->chip_id == OCTEON_CN68XX)) { + /* Reset the Enable bits for Input Queues. */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); + + /* Reset the Enable bits for Output Queues. */ + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); + } +} + +/** + * \brief wait for all pending requests to complete + * @param oct Pointer to Octeon device + * + * Called during shutdown sequence + */ +static int wait_for_pending_requests(struct octeon_device *oct) +{ + int i, pcount = 0; + + for (i = 0; i < 100; i++) { + pcount = + atomic_read(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + if (pcount) + schedule_timeout_uninterruptible(HZ / 10); + else + break; + } + + if (pcount) + return 1; + + return 0; +} + +/** + * \brief Cause device to go quiet so it can be safely removed/reset/etc + * @param oct Pointer to Octeon device + */ +static inline void pcierror_quiesce_device(struct octeon_device *oct) +{ + int i; + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet processing + * to finish. + */ + force_io_queues_off(oct); + + /* To allow for in-flight requests */ + schedule_timeout_uninterruptible(100); + + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + /* Force all requests waiting to be fetched by OCTEON to complete. */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & (1UL << i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq); + spin_unlock_bh(&iq->lock); + } + } + + /* Force all pending ordered list requests to time out. */ + lio_process_ordered_list(oct, 1); + + /* We do not need to wait for output queue packets to be processed. */ +} + +/** + * \brief Cleanup PCI AER uncorrectable error status + * @param dev Pointer to PCI device + */ +static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev) +{ + int pos = 0x100; + u32 status, mask; + + pr_info("%s :\n", __func__); + + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); + pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); + if (dev->error_state == pci_channel_io_normal) + status &= ~mask; /* Clear corresponding nonfatal bits */ + else + status &= mask; /* Clear corresponding fatal bits */ + pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status); +} + +/** + * \brief Stop all PCI IO to a given device + * @param dev Pointer to Octeon device + */ +static void stop_pci_io(struct octeon_device *oct) +{ + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + pci_disable_device(oct->pci_dev); + + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct->chip); + + pcierror_quiesce_device(oct); + + /* Release the interrupt line */ + free_irq(oct->pci_dev->irq, oct); + + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */ + /* making it a common function for all OCTEON models */ + cleanup_aer_uncorrect_error_status(oct->pci_dev); +} + +/** + * \brief called when PCI error is detected + * @param pdev Pointer to PCI device + * @param state The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct octeon_device *oct = pci_get_drvdata(pdev); + + /* Non-correctable Non-fatal errors */ + if (state == pci_channel_io_normal) { + dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n"); + cleanup_aer_uncorrect_error_status(oct->pci_dev); + return PCI_ERS_RESULT_CAN_RECOVER; + } + + /* Non-correctable Fatal errors */ + dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n"); + stop_pci_io(oct); + + /* Always return a DISCONNECT. There is no support for recovery but only + * for a clean shutdown. + */ + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * \brief mmio handler + * @param pdev Pointer to PCI device + */ +static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev) +{ + /* We should never hit this since we never ask for a reset for a Fatal + * Error. We always return DISCONNECT in io_error above. + * But play safe and return RECOVERED for now. + */ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * \brief called after the pci bus has been reset. + * @param pdev Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the octeon_resume routine. + */ +static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev) +{ + /* We should never hit this since we never ask for a reset for a Fatal + * Error. We always return DISCONNECT in io_error above. + * But play safe and return RECOVERED for now. + */ + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * \brief called when traffic can start flowing again. + * @param pdev Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the octeon_resume routine. + */ +static void liquidio_pcie_resume(struct pci_dev *pdev) +{ + /* Nothing to be done here. */ +} + +#ifdef CONFIG_PM +/** + * \brief called when suspending + * @param pdev Pointer to PCI device + * @param state state to suspend to + */ +static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state) +{ + return 0; +} + +/** + * \brief called when resuming + * @param pdev Pointer to PCI device + */ +static int liquidio_resume(struct pci_dev *pdev) +{ + return 0; +} +#endif + +/* For PCI-E Advanced Error Recovery (AER) Interface */ +static struct pci_error_handlers liquidio_err_handler = { + .error_detected = liquidio_pcie_error_detected, + .mmio_enabled = liquidio_pcie_mmio_enabled, + .slot_reset = liquidio_pcie_slot_reset, + .resume = liquidio_pcie_resume, +}; + +static const struct pci_device_id liquidio_pci_tbl[] = { + { /* 68xx */ + PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { /* 66xx */ + PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 + }, + { + 0, 0, 0, 0, 0, 0, 0 + } +}; +MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl); + +static struct pci_driver liquidio_pci_driver = { + .name = "LiquidIO", + .id_table = liquidio_pci_tbl, + .probe = liquidio_probe, + .remove = liquidio_remove, + .err_handler = &liquidio_err_handler, /* For AER */ + +#ifdef CONFIG_PM + .suspend = liquidio_suspend, + .resume = liquidio_resume, +#endif + +}; + +/** + * \brief register PCI driver + */ +static int liquidio_init_pci(void) +{ + return pci_register_driver(&liquidio_pci_driver); +} + +/** + * \brief unregister PCI driver + */ +static void liquidio_deinit_pci(void) +{ + pci_unregister_driver(&liquidio_pci_driver); +} + +/** + * \brief check interface state + * @param lio per-network private data + * @param state_flag flag state to check + */ +static inline int ifstate_check(struct lio *lio, int state_flag) +{ + return atomic_read(&lio->ifstate) & state_flag; +} + +/** + * \brief set interface state + * @param lio per-network private data + * @param state_flag flag state to set + */ +static inline void ifstate_set(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag)); +} + +/** + * \brief clear interface state + * @param lio per-network private data + * @param state_flag flag state to clear + */ +static inline void ifstate_reset(struct lio *lio, int state_flag) +{ + atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag))); +} + +/** + * \brief Stop Tx queues + * @param netdev network device + */ +static inline void txqs_stop(struct net_device *netdev) +{ + if (netif_is_multiqueue(netdev)) { + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_stop_subqueue(netdev, i); + } else { + netif_stop_queue(netdev); + } +} + +/** + * \brief Start Tx queues + * @param netdev network device + */ +static inline void txqs_start(struct net_device *netdev) +{ + if (netif_is_multiqueue(netdev)) { + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_start_subqueue(netdev, i); + } else { + netif_start_queue(netdev); + } +} + +/** + * \brief Wake Tx queues + * @param netdev network device + */ +static inline void txqs_wake(struct net_device *netdev) +{ + if (netif_is_multiqueue(netdev)) { + int i; + + for (i = 0; i < netdev->num_tx_queues; i++) + netif_wake_subqueue(netdev, i); + } else { + netif_wake_queue(netdev); + } +} + +/** + * \brief Stop Tx queue + * @param netdev network device + */ +static void stop_txq(struct net_device *netdev) +{ + txqs_stop(netdev); +} + +/** + * \brief Start Tx queue + * @param netdev network device + */ +static void start_txq(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (lio->linfo.link.s.status) { + txqs_start(netdev); + return; + } +} + +/** + * \brief Wake a queue + * @param netdev network device + * @param q which queue to wake + */ +static inline void wake_q(struct net_device *netdev, int q) +{ + if (netif_is_multiqueue(netdev)) + netif_wake_subqueue(netdev, q); + else + netif_wake_queue(netdev); +} + +/** + * \brief Stop a queue + * @param netdev network device + * @param q which queue to stop + */ +static inline void stop_q(struct net_device *netdev, int q) +{ + if (netif_is_multiqueue(netdev)) + netif_stop_subqueue(netdev, q); + else + netif_stop_queue(netdev); +} + +/** + * \brief Check Tx queue status, and take appropriate action + * @param lio per-network private data + * @returns 0 if full, number of queues woken up otherwise + */ +static inline int check_txq_status(struct lio *lio) +{ + int ret_val = 0; + + if (netif_is_multiqueue(lio->netdev)) { + int numqs = lio->netdev->num_tx_queues; + int q, iq = 0; + + /* check each sub-queue state */ + for (q = 0; q < numqs; q++) { + iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)]; + if (octnet_iq_is_full(lio->oct_dev, iq)) + continue; + wake_q(lio->netdev, q); + ret_val++; + } + } else { + if (octnet_iq_is_full(lio->oct_dev, lio->txq)) + return 0; + wake_q(lio->netdev, lio->txq); + ret_val = 1; + } + return ret_val; +} + +/** + * Remove the node at the head of the list. The list would be empty at + * the end of this call if there are no more nodes in the list. + */ +static inline struct list_head *list_delete_head(struct list_head *root) +{ + struct list_head *node; + + if ((root->prev == root) && (root->next == root)) + node = NULL; + else + node = root->next; + + if (node) + list_del(node); + + return node; +} + +/** + * \brief Delete gather list + * @param lio per-network private data + */ +static void delete_glist(struct lio *lio) +{ + struct octnic_gather *g; + + do { + g = (struct octnic_gather *) + list_delete_head(&lio->glist); + if (g) { + if (g->sg) + kfree((void *)((unsigned long)g->sg - + g->adjust)); + kfree(g); + } + } while (g); +} + +/** + * \brief Setup gather list + * @param lio per-network private data + */ +static int setup_glist(struct lio *lio) +{ + int i; + struct octnic_gather *g; + + INIT_LIST_HEAD(&lio->glist); + + for (i = 0; i < lio->tx_qsize; i++) { + g = kmalloc(sizeof(*g), GFP_KERNEL); + if (!g) + break; + memset(g, 0, sizeof(struct octnic_gather)); + + g->sg_size = + ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); + + g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL); + if (!g->sg) { + kfree(g); + break; + } + + /* The gather component should be aligned on 64-bit boundary */ + if (((unsigned long)g->sg) & 7) { + g->adjust = 8 - (((unsigned long)g->sg) & 7); + g->sg = (struct octeon_sg_entry *) + ((unsigned long)g->sg + g->adjust); + } + list_add_tail(&g->list, &lio->glist); + } + + if (i == lio->tx_qsize) + return 0; + + delete_glist(lio); + return 1; +} + +/** + * \brief Print link information + * @param netdev network device + */ +static void print_link_info(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) { + struct oct_link_info *linfo = &lio->linfo; + + if (linfo->link.s.status) { + netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n", + linfo->link.s.speed, + (linfo->link.s.duplex) ? "Full" : "Half"); + } else { + netif_info(lio, link, lio->netdev, "Link Down\n"); + } + } +} + +/** + * \brief Update link status + * @param netdev network device + * @param ls link status structure + * + * Called on receipt of a link status response from the core application to + * update each interface's link status. + */ +static inline void update_link_status(struct net_device *netdev, + union oct_link_status *ls) +{ + struct lio *lio = GET_LIO(netdev); + + if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) { + lio->linfo.link.u64 = ls->u64; + + print_link_info(netdev); + + if (lio->linfo.link.s.status) { + netif_carrier_on(netdev); + /* start_txq(netdev); */ + txqs_wake(netdev); + } else { + netif_carrier_off(netdev); + stop_txq(netdev); + } + } +} + +/** + * \brief Droq packet processor sceduler + * @param oct octeon device + */ +static +void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct) +{ + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + u64 oq_no; + struct octeon_droq *droq; + + if (oct->int_status & OCT_DEV_INTR_PKT_DATA) { + for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) { + if (!(oct->droq_intr & (1 << oq_no))) + continue; + + droq = oct->droq[oq_no]; + + if (droq->ops.poll_mode) { + droq->ops.napi_fn(droq); + oct_priv->napi_mask |= (1 << oq_no); + } else { + tasklet_schedule(&oct_priv->droq_tasklet); + } + } + } +} + +/** + * \brief Interrupt handler for octeon + * @param irq unused + * @param dev octeon device + */ +static +irqreturn_t liquidio_intr_handler(int irq __attribute__((unused)), void *dev) +{ + struct octeon_device *oct = (struct octeon_device *)dev; + irqreturn_t ret; + + /* Disable our interrupts for the duration of ISR */ + oct->fn_list.disable_interrupt(oct->chip); + + ret = oct->fn_list.process_interrupt_regs(oct); + + if (ret == IRQ_HANDLED) + liquidio_schedule_droq_pkt_handlers(oct); + + /* Re-enable our interrupts */ + if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET)) + oct->fn_list.enable_interrupt(oct->chip); + + return ret; +} + +/** + * \brief Setup interrupt for octeon device + * @param oct octeon device + * + * Enable interrupt in Octeon device as given in the PCI interrupt mask. + */ +static int octeon_setup_interrupt(struct octeon_device *oct) +{ + int irqret, err; + + err = pci_enable_msi(oct->pci_dev); + if (err) + dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n", + err); + else + oct->flags |= LIO_FLAG_MSI_ENABLED; + + irqret = request_irq(oct->pci_dev->irq, liquidio_intr_handler, + IRQF_SHARED, "octeon", oct); + if (irqret) { + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n", + irqret); + return 1; + } + + return 0; +} + +/** + * \brief PCI probe handler + * @param pdev PCI device structure + * @param ent unused + */ +static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct octeon_device *oct_dev = NULL; + struct handshake *hs; + + oct_dev = octeon_allocate_device(pdev->device, + sizeof(struct octeon_device_priv)); + if (!oct_dev) { + dev_err(&pdev->dev, "Unable to allocate device\n"); + return -ENOMEM; + } + + dev_info(&pdev->dev, "Initializing device %x:%x.\n", + (u32)pdev->vendor, (u32)pdev->device); + + /* Assign octeon_device for this device to the private data area. */ + pci_set_drvdata(pdev, oct_dev); + + /* set linux specific device pointer */ + oct_dev->pci_dev = (void *)pdev; + + hs = &handshake[oct_dev->octeon_id]; + init_completion(&hs->init); + init_completion(&hs->started); + hs->pci_dev = pdev; + + if (oct_dev->octeon_id == 0) + /* first LiquidIO NIC is detected */ + complete(&first_stage); + + if (octeon_device_init(oct_dev)) { + liquidio_remove(pdev); + return -ENOMEM; + } + + dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n"); + + return 0; +} + +/** + *\brief Destroy resources associated with octeon device + * @param pdev PCI device structure + * @param ent unused + */ +static void octeon_destroy_resources(struct octeon_device *oct) +{ + int i; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)oct->priv; + + struct handshake *hs; + + switch (atomic_read(&oct->status)) { + case OCT_DEV_RUNNING: + case OCT_DEV_CORE_OK: + + /* No more instructions will be forwarded. */ + atomic_set(&oct->status, OCT_DEV_IN_RESET); + + oct->app_mode = CVM_DRV_INVALID_APP; + dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n", + lio_get_state_string(&oct->status)); + + schedule_timeout_uninterruptible(HZ / 10); + + /* fallthrough */ + case OCT_DEV_HOST_OK: + + /* fallthrough */ + case OCT_DEV_CONSOLE_INIT_DONE: + /* Remove any consoles */ + octeon_remove_consoles(oct); + + /* fallthrough */ + case OCT_DEV_IO_QUEUES_DONE: + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + + if (lio_wait_for_instr_fetch(oct)) + dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + + /* Disable the input and output queues now. No more packets will + * arrive from Octeon, but we should wait for all packet + * processing to finish. + */ + oct->fn_list.disable_io_queues(oct); + + if (lio_wait_for_oq_pkts(oct)) + dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + + /* Disable interrupts */ + oct->fn_list.disable_interrupt(oct->chip); + + /* Release the interrupt line */ + free_irq(oct->pci_dev->irq, oct); + + if (oct->flags & LIO_FLAG_MSI_ENABLED) + pci_disable_msi(oct->pci_dev); + + /* Soft reset the octeon device before exiting */ + oct->fn_list.soft_reset(oct); + + /* Disable the device, releasing the PCI INT */ + pci_disable_device(oct->pci_dev); + + /* fallthrough */ + case OCT_DEV_IN_RESET: + case OCT_DEV_DROQ_INIT_DONE: + /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/ + mdelay(100); + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + if (!(oct->io_qmask.oq & (1UL << i))) + continue; + octeon_delete_droq(oct, i); + } + + /* Force any pending handshakes to complete */ + for (i = 0; i < MAX_OCTEON_DEVICES; i++) { + hs = &handshake[i]; + + if (hs->pci_dev) { + handshake[oct->octeon_id].init_ok = 0; + complete(&handshake[oct->octeon_id].init); + handshake[oct->octeon_id].started_ok = 0; + complete(&handshake[oct->octeon_id].started); + } + } + + /* fallthrough */ + case OCT_DEV_RESP_LIST_INIT_DONE: + octeon_delete_response_list(oct); + + /* fallthrough */ + case OCT_DEV_SC_BUFF_POOL_INIT_DONE: + octeon_free_sc_buffer_pool(oct); + + /* fallthrough */ + case OCT_DEV_INSTR_QUEUE_INIT_DONE: + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + if (!(oct->io_qmask.iq & (1UL << i))) + continue; + octeon_delete_instr_queue(oct, i); + } + + /* fallthrough */ + case OCT_DEV_DISPATCH_INIT_DONE: + octeon_delete_dispatch_list(oct); + cancel_delayed_work_sync(&oct->nic_poll_work.work); + + /* fallthrough */ + case OCT_DEV_PCI_MAP_DONE: + octeon_unmap_pci_barx(oct, 0); + octeon_unmap_pci_barx(oct, 1); + + /* fallthrough */ + case OCT_DEV_BEGIN_STATE: + /* Nothing to be done here either */ + break; + } /* end switch(oct->status) */ + + tasklet_kill(&oct_priv->droq_tasklet); +} + +/** + * \brief Send Rx control command + * @param lio per-network private data + * @param start_stop whether to start or stop + */ +static void send_rx_ctrl_cmd(struct lio *lio, int start_stop) +{ + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.ncmd.s.param2 = start_stop; + nctrl.netpndev = (u64)lio->netdev; + + nparams.resp_order = OCTEON_RESP_NORESPONSE; + + if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0) + netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n"); +} + +/** + * \brief Destroy NIC device interface + * @param oct octeon device + * @param ifidx which interface to destroy + * + * Cleanup associated with each interface for an Octeon device when NIC + * module is being unloaded or if initialization fails during load. + */ +static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx) +{ + struct net_device *netdev = oct->props[ifidx].netdev; + struct lio *lio; + + if (!netdev) { + dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n", + __func__, ifidx); + return; + } + + lio = GET_LIO(netdev); + + dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n"); + + send_rx_ctrl_cmd(lio, 0); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) + txqs_stop(netdev); + + if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) + unregister_netdev(netdev); + + delete_glist(lio); + + free_netdev(netdev); + + oct->props[ifidx].netdev = NULL; +} + +/** + * \brief Stop complete NIC functionality + * @param oct octeon device + */ +static int liquidio_stop_nic_module(struct octeon_device *oct) +{ + int i, j; + struct lio *lio; + + dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n"); + if (!oct->ifcount) { + dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n"); + return 1; + } + + for (i = 0; i < oct->ifcount; i++) { + lio = GET_LIO(oct->props[i].netdev); + for (j = 0; j < lio->linfo.num_rxpciq; j++) + octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]); + } + + for (i = 0; i < oct->ifcount; i++) + liquidio_destroy_nic_device(oct, i); + + dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n"); + return 0; +} + +/** + * \brief Cleans up resources at unload time + * @param pdev PCI device structure + */ +static void liquidio_remove(struct pci_dev *pdev) +{ + struct octeon_device *oct_dev = pci_get_drvdata(pdev); + + dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n"); + + if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP)) + liquidio_stop_nic_module(oct_dev); + + /* Reset the octeon device and cleanup all memory allocated for + * the octeon device by driver. + */ + octeon_destroy_resources(oct_dev); + + dev_info(&oct_dev->pci_dev->dev, "Device removed\n"); + + /* This octeon device has been removed. Update the global + * data structure to reflect this. Free the device structure. + */ + octeon_free_device_mem(oct_dev); +} + +/** + * \brief Identify the Octeon device and to map the BAR address space + * @param oct octeon device + */ +static int octeon_chip_specific_setup(struct octeon_device *oct) +{ + u32 dev_id, rev_id; + int ret = 1; + + pci_read_config_dword(oct->pci_dev, 0, &dev_id); + pci_read_config_dword(oct->pci_dev, 8, &rev_id); + oct->rev_id = rev_id & 0xff; + + switch (dev_id) { + case OCTEON_CN68XX_PCIID: + oct->chip_id = OCTEON_CN68XX; + ret = lio_setup_cn68xx_octeon_device(oct); + break; + + case OCTEON_CN66XX_PCIID: + oct->chip_id = OCTEON_CN66XX; + ret = lio_setup_cn66xx_octeon_device(oct); + break; + default: + dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n", + dev_id); + } + + if (!ret) + dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n", + OCTEON_MAJOR_REV(oct), + OCTEON_MINOR_REV(oct), + octeon_get_conf(oct)->card_name); + + return ret; +} + +/** + * \brief PCI initialization for each Octeon device. + * @param oct octeon device + */ +static int octeon_pci_os_setup(struct octeon_device *oct) +{ + /* setup PCI stuff first */ + if (pci_enable_device(oct->pci_dev)) { + dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n"); + return 1; + } + + if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { + dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); + return 1; + } + + /* Enable PCI DMA Master. */ + pci_set_master(oct->pci_dev); + + return 0; +} + +/** + * \brief Check Tx queue state for a given network buffer + * @param lio per-network private data + * @param skb network buffer + */ +static inline int check_txq_state(struct lio *lio, struct sk_buff *skb) +{ + int q = 0, iq = 0; + + if (netif_is_multiqueue(lio->netdev)) { + q = skb->queue_mapping; + iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))]; + } else { + iq = lio->txq; + } + + if (octnet_iq_is_full(lio->oct_dev, iq)) + return 0; + wake_q(lio->netdev, q); + return 1; +} + +/** + * \brief Unmap and free network buffer + * @param buf buffer + */ +static void free_netbuf(void *buf) +{ + struct sk_buff *skb; + struct octnet_buf_free_info *finfo; + struct lio *lio; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len, + DMA_TO_DEVICE); + + check_txq_state(lio, skb); + + recv_buffer_free((struct sk_buff *)skb); +} + +/** + * \brief Unmap and free gather buffer + * @param buf buffer + */ +static void free_netsgbuf(void *buf) +{ + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct lio *lio; + struct octnic_gather *g; + int i, frags; + + finfo = (struct octnet_buf_free_info *)buf; + skb = finfo->skb; + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + pci_unmap_page((lio->oct_dev)->pci_dev, + g->sg[(i >> 2)].ptr[(i & 3)], + frag->size, DMA_TO_DEVICE); + i++; + } + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + finfo->dptr, g->sg_size, + DMA_TO_DEVICE); + + spin_lock(&lio->lock); + list_add_tail(&g->list, &lio->glist); + spin_unlock(&lio->lock); + + check_txq_state(lio, skb); /* mq support: sub-queue state check */ + + recv_buffer_free((struct sk_buff *)skb); +} + +/** + * \brief Unmap and free gather buffer with response + * @param buf buffer + */ +static void free_netsgbuf_with_resp(void *buf) +{ + struct octeon_soft_command *sc; + struct octnet_buf_free_info *finfo; + struct sk_buff *skb; + struct lio *lio; + struct octnic_gather *g; + int i, frags; + + sc = (struct octeon_soft_command *)buf; + skb = (struct sk_buff *)sc->callback_arg; + finfo = (struct octnet_buf_free_info *)&skb->cb; + + lio = finfo->lio; + g = finfo->g; + frags = skb_shinfo(skb)->nr_frags; + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + g->sg[0].ptr[0], (skb->len - skb->data_len), + DMA_TO_DEVICE); + + i = 1; + while (frags--) { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; + + pci_unmap_page((lio->oct_dev)->pci_dev, + g->sg[(i >> 2)].ptr[(i & 3)], + frag->size, DMA_TO_DEVICE); + i++; + } + + dma_unmap_single(&lio->oct_dev->pci_dev->dev, + finfo->dptr, g->sg_size, + DMA_TO_DEVICE); + + spin_lock(&lio->lock); + list_add_tail(&g->list, &lio->glist); + spin_unlock(&lio->lock); + + /* Don't free the skb yet */ + + check_txq_state(lio, skb); +} + +/** + * \brief Adjust ptp frequency + * @param ptp PTP clock info + * @param ppb how much to adjust by, in parts-per-billion + */ +static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) +{ + struct lio *lio = container_of(ptp, struct lio, ptp_info); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + u64 comp, delta; + unsigned long flags; + bool neg_adj = false; + + if (ppb < 0) { + neg_adj = true; + ppb = -ppb; + } + + /* The hardware adds the clock compensation value to the + * PTP clock on every coprocessor clock cycle, so we + * compute the delta in terms of coprocessor clocks. + */ + delta = (u64)ppb << 32; + do_div(delta, oct->coproc_clock_rate); + + spin_lock_irqsave(&lio->ptp_lock, flags); + comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP); + if (neg_adj) + comp -= delta; + else + comp += delta; + lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP); + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + return 0; +} + +/** + * \brief Adjust ptp time + * @param ptp PTP clock info + * @param delta how much to adjust by, in nanosecs + */ +static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + unsigned long flags; + struct lio *lio = container_of(ptp, struct lio, ptp_info); + + spin_lock_irqsave(&lio->ptp_lock, flags); + lio->ptp_adjust += delta; + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + return 0; +} + +/** + * \brief Get hardware clock time, including any adjustment + * @param ptp PTP clock info + * @param ts timespec + */ +static int liquidio_ptp_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + u64 ns; + u32 remainder; + unsigned long flags; + struct lio *lio = container_of(ptp, struct lio, ptp_info); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + + spin_lock_irqsave(&lio->ptp_lock, flags); + ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI); + ns += lio->ptp_adjust; + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +/** + * \brief Set hardware clock time. Reset adjustment + * @param ptp PTP clock info + * @param ts timespec + */ +static int liquidio_ptp_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + u64 ns; + unsigned long flags; + struct lio *lio = container_of(ptp, struct lio, ptp_info); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + + ns = timespec_to_ns(ts); + + spin_lock_irqsave(&lio->ptp_lock, flags); + lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); + lio->ptp_adjust = 0; + spin_unlock_irqrestore(&lio->ptp_lock, flags); + + return 0; +} + +/** + * \brief Check if PTP is enabled + * @param ptp PTP clock info + * @param rq request + * @param on is it on + */ +static int liquidio_ptp_enable(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, int on) +{ + return -EOPNOTSUPP; +} + +/** + * \brief Open PTP clock source + * @param netdev network device + */ +static void oct_ptp_open(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; + + spin_lock_init(&lio->ptp_lock); + + snprintf(lio->ptp_info.name, 16, "%s", netdev->name); + lio->ptp_info.owner = THIS_MODULE; + lio->ptp_info.max_adj = 250000000; + lio->ptp_info.n_alarm = 0; + lio->ptp_info.n_ext_ts = 0; + lio->ptp_info.n_per_out = 0; + lio->ptp_info.pps = 0; + lio->ptp_info.adjfreq = liquidio_ptp_adjfreq; + lio->ptp_info.adjtime = liquidio_ptp_adjtime; + lio->ptp_info.gettime64 = liquidio_ptp_gettime; + lio->ptp_info.settime64 = liquidio_ptp_settime; + lio->ptp_info.enable = liquidio_ptp_enable; + + lio->ptp_adjust = 0; + + lio->ptp_clock = ptp_clock_register(&lio->ptp_info, + &oct->pci_dev->dev); + + if (IS_ERR(lio->ptp_clock)) + lio->ptp_clock = NULL; +} + +/** + * \brief Init PTP clock + * @param oct octeon device + */ +static void liquidio_ptp_init(struct octeon_device *oct) +{ + u64 clock_comp, cfg; + + clock_comp = (u64)NSEC_PER_SEC << 32; + do_div(clock_comp, oct->coproc_clock_rate); + lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP); + + /* Enable */ + cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG); + lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG); +} + +/** + * \brief Load firmware to device + * @param oct octeon device + * + * Maps device to firmware filename, requests firmware, and downloads it + */ +static int load_firmware(struct octeon_device *oct) +{ + int ret = 0; + const struct firmware *fw; + char fw_name[LIO_MAX_FW_FILENAME_LEN]; + char *tmp_fw_type; + + if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, + sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) { + dev_info(&oct->pci_dev->dev, "Skipping firmware load\n"); + return ret; + } + + if (fw_type[0] == '\0') + tmp_fw_type = LIO_FW_NAME_TYPE_NIC; + else + tmp_fw_type = fw_type; + + sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME, + octeon_get_conf(oct)->card_name, tmp_fw_type, + LIO_FW_NAME_SUFFIX); + + ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev); + if (ret) { + dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.", + fw_name); + return ret; + } + + ret = octeon_download_firmware(oct, fw->data, fw->size); + + release_firmware(fw); + + return ret; +} + +/** + * \brief Setup output queue + * @param oct octeon device + * @param q_no which queue + * @param num_descs how many descriptors + * @param desc_size size of each descriptor + * @param app_ctx application context + */ +static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, + int desc_size, void *app_ctx) +{ + int ret_val = 0; + + dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); + /* droq creation and local register settings. */ + ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); + if (ret_val == -1) + return ret_val; + + if (ret_val == 1) { + dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no); + return 0; + } + /* tasklet creation for the droq */ + + /* Enable the droq queues */ + octeon_set_droq_pkt_op(oct, q_no, 1); + + /* Send Credit for Octeon Output queues. Credits are always + * sent after the output queue is enabled. + */ + writel(oct->droq[q_no]->max_count, + oct->droq[q_no]->pkts_credit_reg); + + return ret_val; +} + +/** + * \brief Callback for getting interface configuration + * @param status status of request + * @param buf pointer to resp structure + */ +static void if_cfg_callback(struct octeon_device *oct, + u32 status, + void *buf) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; + struct liquidio_if_cfg_resp *resp; + struct liquidio_if_cfg_context *ctx; + + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + oct = lio_get_device(ctx->octeon_id); + if (resp->status) + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", + CVM_CAST64(resp->status)); + ACCESS_ONCE(ctx->cond) = 1; + + /* This barrier is required to be sure that the response has been + * written fully before waking up the handler + */ + wmb(); + + wake_up_interruptible(&ctx->wc); +} + +/** + * \brief Select queue based on hash + * @param dev Net device + * @param skb sk_buff structure + * @returns selected queue number + */ +static u16 select_q(struct net_device *dev, struct sk_buff *skb, + void *accel_priv, select_queue_fallback_t fallback) +{ + int qindex; + struct lio *lio; + + lio = GET_LIO(dev); + /* select queue on chosen queue_mapping or core */ + qindex = skb_rx_queue_recorded(skb) ? + skb_get_rx_queue(skb) : smp_processor_id(); + return (u16)(qindex & (lio->linfo.num_txpciq - 1)); +} + +/** Routine to push packets arriving on Octeon interface upto network layer. + * @param oct_id - octeon device id. + * @param skbuff - skbuff struct to be passed to network layer. + * @param len - size of total data received. + * @param rh - Control header associated with the packet + * @param param - additional control data with the packet + */ +static void +liquidio_push_packet(u32 octeon_id, + void *skbuff, + u32 len, + union octeon_rh *rh, + void *param) +{ + struct napi_struct *napi = param; + struct octeon_device *oct = lio_get_device(octeon_id); + struct sk_buff *skb = (struct sk_buff *)skbuff; + struct skb_shared_hwtstamps *shhwtstamps; + u64 ns; + struct net_device *netdev = + (struct net_device *)oct->props[rh->r_dh.link].netdev; + struct octeon_droq *droq = container_of(param, struct octeon_droq, + napi); + if (netdev) { + int packet_was_received; + struct lio *lio = GET_LIO(netdev); + + /* Do not proceed if the interface is not in RUNNING state. */ + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) { + recv_buffer_free(skb); + droq->stats.rx_dropped++; + return; + } + + skb->dev = netdev; + + if (rh->r_dh.has_hwtstamp) { + /* timestamp is included from the hardware at the + * beginning of the packet. + */ + if (ifstate_check(lio, + LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) { + /* Nanoseconds are in the first 64-bits + * of the packet. + */ + memcpy(&ns, (skb->data), sizeof(ns)); + shhwtstamps = skb_hwtstamps(skb); + shhwtstamps->hwtstamp = + ns_to_ktime(ns + lio->ptp_adjust); + } + skb_pull(skb, sizeof(ns)); + } + + skb->protocol = eth_type_trans(skb, skb->dev); + + if ((netdev->features & NETIF_F_RXCSUM) && + (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED)) + /* checksum has already been verified */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + + packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP; + + if (packet_was_received) { + droq->stats.rx_bytes_received += len; + droq->stats.rx_pkts_received++; + netdev->last_rx = jiffies; + } else { + droq->stats.rx_dropped++; + netif_info(lio, rx_err, lio->netdev, + "droq:%d error rx_dropped:%llu\n", + droq->q_no, droq->stats.rx_dropped); + } + + } else { + recv_buffer_free(skb); + } +} + +/** + * \brief wrapper for calling napi_schedule + * @param param parameters to pass to napi_schedule + * + * Used when scheduling on different CPUs + */ +static void napi_schedule_wrapper(void *param) +{ + struct napi_struct *napi = param; + + napi_schedule(napi); +} + +/** + * \brief callback when receive interrupt occurs and we are in NAPI mode + * @param arg pointer to octeon output queue + */ +static void liquidio_napi_drv_callback(void *arg) +{ + struct octeon_droq *droq = arg; + int this_cpu = smp_processor_id(); + + if (droq->cpu_id == this_cpu) { + napi_schedule(&droq->napi); + } else { + struct call_single_data *csd = &droq->csd; + + csd->func = napi_schedule_wrapper; + csd->info = &droq->napi; + csd->flags = 0; + + smp_call_function_single_async(droq->cpu_id, csd); + } +} + +/** + * \brief Main NAPI poll function + * @param droq octeon output queue + * @param budget maximum number of items to process + */ +static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget) +{ + int work_done; + struct lio *lio = GET_LIO(droq->napi.dev); + struct octeon_device *oct = lio->oct_dev; + + work_done = octeon_process_droq_poll_cmd(oct, droq->q_no, + POLL_EVENT_PROCESS_PKTS, + budget); + if (work_done < 0) { + netif_info(lio, rx_err, lio->netdev, + "Receive work_done < 0, rxq:%d\n", droq->q_no); + goto octnet_napi_finish; + } + + if (work_done > budget) + dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n", + __func__, work_done, budget); + + return work_done; + +octnet_napi_finish: + napi_complete(&droq->napi); + octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR, + 0); + return 0; +} + +/** + * \brief Entry point for NAPI polling + * @param napi NAPI structure + * @param budget maximum number of items to process + */ +static int liquidio_napi_poll(struct napi_struct *napi, int budget) +{ + struct octeon_droq *droq; + int work_done; + + droq = container_of(napi, struct octeon_droq, napi); + + work_done = liquidio_napi_do_rx(droq, budget); + + if (work_done < budget) { + napi_complete(napi); + octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no, + POLL_EVENT_ENABLE_INTR, 0); + return 0; + } + + return work_done; +} + +/** + * \brief Setup input and output queues + * @param octeon_dev octeon device + * @param net_device Net device + * + * Note: Queues are with respect to the octeon device. Thus + * an input queue is for egress packets, and output queues + * are for ingress packets. + */ +static inline int setup_io_queues(struct octeon_device *octeon_dev, + struct net_device *net_device) +{ + static int first_time = 1; + static struct octeon_droq_ops droq_ops; + static int cpu_id; + static int cpu_id_modulus; + struct octeon_droq *droq; + struct napi_struct *napi; + int q, q_no, retval = 0; + struct lio *lio; + int num_tx_descs; + + lio = GET_LIO(net_device); + if (first_time) { + first_time = 0; + memset(&droq_ops, 0, sizeof(struct octeon_droq_ops)); + + droq_ops.fptr = liquidio_push_packet; + + droq_ops.poll_mode = 1; + droq_ops.napi_fn = liquidio_napi_drv_callback; + cpu_id = 0; + cpu_id_modulus = num_present_cpus(); + } + + /* set up DROQs. */ + for (q = 0; q < lio->linfo.num_rxpciq; q++) { + q_no = lio->linfo.rxpciq[q]; + + retval = octeon_setup_droq(octeon_dev, q_no, + CFG_GET_NUM_RX_DESCS_NIC_IF + (octeon_get_conf(octeon_dev), + lio->ifidx), + CFG_GET_NUM_RX_BUF_SIZE_NIC_IF + (octeon_get_conf(octeon_dev), + lio->ifidx), NULL); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + " %s : Runtime DROQ(RxQ) creation failed.\n", + __func__); + return 1; + } + + droq = octeon_dev->droq[q_no]; + napi = &droq->napi; + netif_napi_add(net_device, napi, liquidio_napi_poll, 64); + + /* designate a CPU for this droq */ + droq->cpu_id = cpu_id; + cpu_id++; + if (cpu_id >= cpu_id_modulus) + cpu_id = 0; + + octeon_register_droq_ops(octeon_dev, q_no, &droq_ops); + } + + /* set up IQs. */ + for (q = 0; q < lio->linfo.num_txpciq; q++) { + num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf + (octeon_dev), + lio->ifidx); + retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q], + num_tx_descs, + netdev_get_tx_queue(net_device, q)); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + " %s : Runtime IQ(TxQ) creation failed.\n", + __func__); + return 1; + } + } + + return 0; +} + +/** + * \brief Poll routine for checking transmit queue status + * @param work work_struct data structure + */ +static void octnet_poll_check_txq_status(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct lio *lio = (struct lio *)wk->ctxptr; + + if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) + return; + + check_txq_status(lio); + queue_delayed_work(lio->txq_status_wq.wq, + &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); +} + +/** + * \brief Sets up the txq poll check + * @param netdev network device + */ +static inline void setup_tx_poll_fn(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + lio->txq_status_wq.wq = create_workqueue("txq-status"); + if (!lio->txq_status_wq.wq) { + dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n"); + return; + } + INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work, + octnet_poll_check_txq_status); + lio->txq_status_wq.wk.ctxptr = lio; + queue_delayed_work(lio->txq_status_wq.wq, + &lio->txq_status_wq.wk.work, msecs_to_jiffies(1)); +} + +/** + * \brief Net device open for LiquidIO + * @param netdev network device + */ +static int liquidio_open(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct napi_struct *napi, *n; + + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_enable(napi); + + oct_ptp_open(netdev); + + ifstate_set(lio, LIO_IFSTATE_RUNNING); + setup_tx_poll_fn(netdev); + start_txq(netdev); + + netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n"); + try_module_get(THIS_MODULE); + + /* tell Octeon to start forwarding packets to host */ + send_rx_ctrl_cmd(lio, 1); + + /* Ready for link status updates */ + lio->intf_open = 1; + + dev_info(&oct->pci_dev->dev, "%s interface is opened\n", + netdev->name); + + return 0; +} + +/** + * \brief Net device stop for LiquidIO + * @param netdev network device + */ +static int liquidio_stop(struct net_device *netdev) +{ + struct napi_struct *napi, *n; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n"); + /* Inform that netif carrier is down */ + lio->intf_open = 0; + lio->linfo.link.s.status = 0; + + netif_carrier_off(netdev); + + /* tell Octeon to stop forwarding packets to host */ + send_rx_ctrl_cmd(lio, 0); + + cancel_delayed_work_sync(&lio->txq_status_wq.wk.work); + flush_workqueue(lio->txq_status_wq.wq); + destroy_workqueue(lio->txq_status_wq.wq); + + if (lio->ptp_clock) { + ptp_clock_unregister(lio->ptp_clock); + lio->ptp_clock = NULL; + } + + ifstate_reset(lio, LIO_IFSTATE_RUNNING); + + /* This is a hack that allows DHCP to continue working. */ + set_bit(__LINK_STATE_START, &lio->netdev->state); + + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + txqs_stop(netdev); + + dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name); + module_put(THIS_MODULE); + + return 0; +} + +void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr) +{ + struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr; + struct net_device *netdev = (struct net_device *)nctrl->netpndev; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + + switch (nctrl->ncmd.s.cmd) { + case OCTNET_CMD_CHANGE_DEVFLAGS: + case OCTNET_CMD_SET_MULTI_LIST: + break; + + case OCTNET_CMD_CHANGE_MACADDR: + /* If command is successful, change the MACADDR. */ + netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n", + CVM_CAST64(nctrl->udd[0])); + dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n", + netdev->name, CVM_CAST64(nctrl->udd[0])); + memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN); + break; + + case OCTNET_CMD_CHANGE_MTU: + /* If command is successful, change the MTU. */ + netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n", + netdev->mtu, nctrl->ncmd.s.param2); + dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n", + netdev->name, netdev->mtu, + nctrl->ncmd.s.param2); + netdev->mtu = nctrl->ncmd.s.param2; + break; + + case OCTNET_CMD_GPIO_ACCESS: + netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n"); + + break; + + case OCTNET_CMD_LRO_ENABLE: + dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); + break; + + case OCTNET_CMD_LRO_DISABLE: + dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", + netdev->name); + break; + + case OCTNET_CMD_VERBOSE_ENABLE: + dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); + break; + + case OCTNET_CMD_VERBOSE_DISABLE: + dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", + netdev->name); + break; + + case OCTNET_CMD_SET_SETTINGS: + dev_info(&oct->pci_dev->dev, "%s settings changed\n", + netdev->name); + + break; + + default: + dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__, + nctrl->ncmd.s.cmd); + } +} + +/** + * \brief Converts a mask based on net device flags + * @param netdev network device + * + * This routine generates a octnet_ifflags mask from the net device flags + * received from the OS. + */ +static inline enum octnet_ifflags get_new_flags(struct net_device *netdev) +{ + enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST; + + if (netdev->flags & IFF_PROMISC) + f |= OCTNET_IFFLAG_PROMISC; + + if (netdev->flags & IFF_ALLMULTI) + f |= OCTNET_IFFLAG_ALLMULTI; + + if (netdev->flags & IFF_MULTICAST) { + f |= OCTNET_IFFLAG_MULTICAST; + + /* Accept all multicast addresses if there are more than we + * can handle + */ + if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR) + f |= OCTNET_IFFLAG_ALLMULTI; + } + + if (netdev->flags & IFF_BROADCAST) + f |= OCTNET_IFFLAG_BROADCAST; + + return f; +} + +/** + * \brief Net device set_multicast_list + * @param netdev network device + */ +static void liquidio_set_mcast_list(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + struct netdev_hw_addr *ha; + u64 *mc; + int ret, i; + int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR); + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + /* Create a ctrl pkt command to be sent to core app. */ + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.ncmd.s.param2 = get_new_flags(netdev); + nctrl.ncmd.s.param3 = mc_count; + nctrl.ncmd.s.more = mc_count; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + /* copy all the addresses into the udd */ + i = 0; + mc = &nctrl.udd[0]; + netdev_for_each_mc_addr(ha, netdev) { + *mc = 0; + memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN); + /* no need to swap bytes */ + + if (++mc > &nctrl.udd[mc_count]) + break; + } + + /* Apparently, any activity in this call from the kernel has to + * be atomic. So we won't wait for response. + */ + nctrl.wait_time = 0; + + nparams.resp_order = OCTEON_RESP_NORESPONSE; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n", + ret); + } +} + +/** + * \brief Net device set_mac_address + * @param netdev network device + */ +static int liquidio_set_mac(struct net_device *netdev, void *p) +{ + int ret = 0; + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct sockaddr *addr = (struct sockaddr *)p; + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + + if ((!is_valid_ether_addr(addr->sa_data)) || + (ifstate_check(lio, LIO_IFSTATE_RUNNING))) + return -EADDRNOTAVAIL; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.ncmd.s.param2 = 0; + nctrl.ncmd.s.more = 1; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + nctrl.wait_time = 100; + + nctrl.udd[0] = 0; + /* The MAC Address is presented in network byte order. */ + memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN); + + nparams.resp_order = OCTEON_RESP_ORDERED; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "MAC Address change failed\n"); + return -ENOMEM; + } + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN); + + return 0; +} + +/** + * \brief Net device get_stats + * @param netdev network device + */ +static struct net_device_stats *liquidio_get_stats(struct net_device *netdev) +{ + struct lio *lio = GET_LIO(netdev); + struct net_device_stats *stats = &netdev->stats; + struct octeon_device *oct; + u64 pkts = 0, drop = 0, bytes = 0; + struct oct_droq_stats *oq_stats; + struct oct_iq_stats *iq_stats; + int i, iq_no, oq_no; + + oct = lio->oct_dev; + + for (i = 0; i < lio->linfo.num_txpciq; i++) { + iq_no = lio->linfo.txpciq[i]; + iq_stats = &oct->instr_queue[iq_no]->stats; + pkts += iq_stats->tx_done; + drop += iq_stats->tx_dropped; + bytes += iq_stats->tx_tot_bytes; + } + + stats->tx_packets = pkts; + stats->tx_bytes = bytes; + stats->tx_dropped = drop; + + pkts = 0; + drop = 0; + bytes = 0; + + for (i = 0; i < lio->linfo.num_rxpciq; i++) { + oq_no = lio->linfo.rxpciq[i]; + oq_stats = &oct->droq[oq_no]->stats; + pkts += oq_stats->rx_pkts_received; + drop += (oq_stats->rx_dropped + + oq_stats->dropped_nodispatch + + oq_stats->dropped_toomany + + oq_stats->dropped_nomem); + bytes += oq_stats->rx_bytes_received; + } + + stats->rx_bytes = bytes; + stats->rx_packets = pkts; + stats->rx_dropped = drop; + + return stats; +} + +/** + * \brief Net device change_mtu + * @param netdev network device + */ +static int liquidio_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE; + int ret = 0; + + /* Limit the MTU to make sure the ethernet packets are between 64 bytes + * and 65535 bytes + */ + if ((max_frm_size < OCTNET_MIN_FRM_SIZE) || + (max_frm_size > OCTNET_MAX_FRM_SIZE)) { + dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu); + dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n", + (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE), + (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)); + return -EINVAL; + } + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.ncmd.s.param2 = new_mtu; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + nparams.resp_order = OCTEON_RESP_ORDERED; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Failed to set MTU\n"); + return -1; + } + + lio->mtu = new_mtu; + + return 0; +} + +/** + * \brief Handler for SIOCSHWTSTAMP ioctl + * @param netdev network device + * @param ifr interface request + * @param cmd command + */ +static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + struct hwtstamp_config conf; + struct lio *lio = GET_LIO(netdev); + + if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf))) + return -EFAULT; + + if (conf.flags) + return -EINVAL; + + switch (conf.tx_type) { + case HWTSTAMP_TX_ON: + case HWTSTAMP_TX_OFF: + break; + default: + return -ERANGE; + } + + switch (conf.rx_filter) { + case HWTSTAMP_FILTER_NONE: + break; + case HWTSTAMP_FILTER_ALL: + case HWTSTAMP_FILTER_SOME: + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + conf.rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + if (conf.rx_filter == HWTSTAMP_FILTER_ALL) + ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + else + ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED); + + return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0; +} + +/** + * \brief ioctl handler + * @param netdev network device + * @param ifr interface request + * @param cmd command + */ +static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCSHWTSTAMP: + return hwtstamp_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * \brief handle a Tx timestamp response + * @param status response status + * @param buf pointer to skb + */ +static void handle_timestamp(struct octeon_device *oct, + u32 status, + void *buf) +{ + struct octnet_buf_free_info *finfo; + struct octeon_soft_command *sc; + struct oct_timestamp_resp *resp; + struct lio *lio; + struct sk_buff *skb = (struct sk_buff *)buf; + + finfo = (struct octnet_buf_free_info *)skb->cb; + lio = finfo->lio; + sc = finfo->sc; + oct = lio->oct_dev; + resp = (struct oct_timestamp_resp *)sc->virtrptr; + + if (status != OCTEON_REQUEST_DONE) { + dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n", + CVM_CAST64(status)); + resp->timestamp = 0; + } + + octeon_swap_8B_data(&resp->timestamp, 1); + + if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { + struct skb_shared_hwtstamps ts; + u64 ns = resp->timestamp; + + netif_info(lio, tx_done, lio->netdev, + "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n", + skb, (unsigned long long)ns); + ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust); + skb_tstamp_tx(skb, &ts); + } + + octeon_free_soft_command(oct, sc); + recv_buffer_free(skb); +} + +/* \brief Send a data packet that will be timestamped + * @param oct octeon device + * @param ndata pointer to network data + * @param finfo pointer to private network data + */ +static inline int send_nic_timestamp_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + struct octnet_buf_free_info *finfo, + int xmit_more) +{ + int retval; + struct octeon_soft_command *sc; + struct octeon_instr_ih *ih; + struct octeon_instr_rdp *rdp; + struct lio *lio; + int ring_doorbell; + + lio = finfo->lio; + + sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd, + sizeof(struct oct_timestamp_resp)); + finfo->sc = sc; + + if (!sc) { + dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n"); + return IQ_SEND_FAILED; + } + + if (ndata->reqtype == REQTYPE_NORESP_NET) + ndata->reqtype = REQTYPE_RESP_NET; + else if (ndata->reqtype == REQTYPE_NORESP_NET_SG) + ndata->reqtype = REQTYPE_RESP_NET_SG; + + sc->callback = handle_timestamp; + sc->callback_arg = finfo->skb; + sc->iq_no = ndata->q_no; + + ih = (struct octeon_instr_ih *)&sc->cmd.ih; + rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + + ring_doorbell = !xmit_more; + retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd, + sc, ih->dlengsz, ndata->reqtype); + + if (retval) { + dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n", + retval); + octeon_free_soft_command(oct, sc); + } else { + netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n"); + } + + return retval; +} + +static inline int is_ipv4(struct sk_buff *skb) +{ + return (skb->protocol == htons(ETH_P_IP)) && + (ip_hdr(skb)->version == 4); +} + +static inline int is_vlan(struct sk_buff *skb) +{ + return skb->protocol == htons(ETH_P_8021Q); +} + +static inline int is_ip_fragmented(struct sk_buff *skb) +{ + /* The Don't fragment and Reserved flag fields are ignored. + * IP is fragmented if + * - the More fragments bit is set (indicating this IP is a fragment + * with more to follow; the current offset could be 0 ). + * - ths offset field is non-zero. + */ + return htons(ip_hdr(skb)->frag_off) & 0x3fff; +} + +static inline int is_ipv6(struct sk_buff *skb) +{ + return (skb->protocol == htons(ETH_P_IPV6)) && + (ipv6_hdr(skb)->version == 6); +} + +static inline int is_with_extn_hdr(struct sk_buff *skb) +{ + return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) && + (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP); +} + +static inline int is_tcpudp(struct sk_buff *skb) +{ + return (ip_hdr(skb)->protocol == IPPROTO_TCP) || + (ip_hdr(skb)->protocol == IPPROTO_UDP); +} + +static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb) +{ + u32 tag; + struct iphdr *iphdr = ip_hdr(skb); + + tag = crc32(0, &iphdr->protocol, 1); + tag = crc32(tag, (u8 *)&iphdr->saddr, 8); + tag = crc32(tag, skb_transport_header(skb), 4); + return tag; +} + +static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb) +{ + u32 tag; + struct ipv6hdr *ipv6hdr = ipv6_hdr(skb); + + tag = crc32(0, &ipv6hdr->nexthdr, 1); + tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32); + tag = crc32(tag, skb_transport_header(skb), 4); + return tag; +} + +/** \brief Transmit networks packets to the Octeon interface + * @param skbuff skbuff struct to be passed to network layer. + * @param netdev pointer to network device + * @returns whether the packet was transmitted to the device okay or not + * (NETDEV_TX_OK or NETDEV_TX_BUSY) + */ +static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct lio *lio; + struct octnet_buf_free_info *finfo; + union octnic_cmd_setup cmdsetup; + struct octnic_data_pkt ndata; + struct octeon_device *oct; + struct oct_iq_stats *stats; + int cpu = 0, status = 0; + int q_idx = 0, iq_no = 0; + int xmit_more; + u32 tag = 0; + + lio = GET_LIO(netdev); + oct = lio->oct_dev; + + if (netif_is_multiqueue(netdev)) { + cpu = skb->queue_mapping; + q_idx = (cpu & (lio->linfo.num_txpciq - 1)); + iq_no = lio->linfo.txpciq[q_idx]; + } else { + iq_no = lio->txq; + } + + stats = &oct->instr_queue[iq_no]->stats; + + /* Check for all conditions in which the current packet cannot be + * transmitted. + */ + if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) || + (!lio->linfo.link.s.status) || + (skb->len <= 0)) { + netif_info(lio, tx_err, lio->netdev, + "Transmit failed link_status : %d\n", + lio->linfo.link.s.status); + goto lio_xmit_failed; + } + + /* Use space in skb->cb to store info used to unmap and + * free the buffers. + */ + finfo = (struct octnet_buf_free_info *)skb->cb; + finfo->lio = lio; + finfo->skb = skb; + finfo->sc = NULL; + + /* Prepare the attributes for the data to be passed to OSI. */ + memset(&ndata, 0, sizeof(struct octnic_data_pkt)); + + ndata.buf = (void *)finfo; + + ndata.q_no = iq_no; + + if (netif_is_multiqueue(netdev)) { + if (octnet_iq_is_full(oct, ndata.q_no)) { + /* defer sending if queue is full */ + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + stats->tx_iq_busy++; + return NETDEV_TX_BUSY; + } + } else { + if (octnet_iq_is_full(oct, lio->txq)) { + /* defer sending if queue is full */ + stats->tx_iq_busy++; + netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n", + ndata.q_no); + return NETDEV_TX_BUSY; + } + } + /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n", + * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no ); + */ + + ndata.datasize = skb->len; + + cmdsetup.u64 = 0; + cmdsetup.s.ifidx = lio->linfo.ifidx; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) { + tag = get_ipv4_5tuple_tag(skb); + + cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; + + if (ip_hdr(skb)->ihl > 5) + cmdsetup.s.ipv4opts_ipv6exthdr = + OCT_PKT_PARAM_IPV4OPTS; + + } else if (is_ipv6(skb)) { + tag = get_ipv6_5tuple_tag(skb); + + cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1; + + if (is_with_extn_hdr(skb)) + cmdsetup.s.ipv4opts_ipv6exthdr = + OCT_PKT_PARAM_IPV6EXTHDR; + + } else if (is_vlan(skb)) { + if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto + == htons(ETH_P_IP) && + !is_ip_fragmented(skb) && is_tcpudp(skb)) { + tag = get_ipv4_5tuple_tag(skb); + + cmdsetup.s.cksum_offset = + sizeof(struct vlan_ethhdr) + 1; + + if (ip_hdr(skb)->ihl > 5) + cmdsetup.s.ipv4opts_ipv6exthdr = + OCT_PKT_PARAM_IPV4OPTS; + + } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto + == htons(ETH_P_IPV6)) { + tag = get_ipv6_5tuple_tag(skb); + + cmdsetup.s.cksum_offset = + sizeof(struct vlan_ethhdr) + 1; + + if (is_with_extn_hdr(skb)) + cmdsetup.s.ipv4opts_ipv6exthdr = + OCT_PKT_PARAM_IPV6EXTHDR; + } + } + } + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + cmdsetup.s.timestamp = 1; + } + + if (skb_shinfo(skb)->nr_frags == 0) { + cmdsetup.s.u.datasize = skb->len; + octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + /* Offload checksum calculation for TCP/UDP packets */ + ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, + skb->data, + skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n", + __func__); + return NETDEV_TX_BUSY; + } + + finfo->dptr = ndata.cmd.dptr; + + ndata.reqtype = REQTYPE_NORESP_NET; + + } else { + int i, frags; + struct skb_frag_struct *frag; + struct octnic_gather *g; + + spin_lock(&lio->lock); + g = (struct octnic_gather *)list_delete_head(&lio->glist); + spin_unlock(&lio->lock); + + if (!g) { + netif_info(lio, tx_err, lio->netdev, + "Transmit scatter gather: glist null!\n"); + goto lio_xmit_failed; + } + + cmdsetup.s.gather = 1; + cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1); + octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag); + + memset(g->sg, 0, g->sg_size); + + g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev, + skb->data, + (skb->len - skb->data_len), + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n", + __func__); + return NETDEV_TX_BUSY; + } + add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0); + + frags = skb_shinfo(skb)->nr_frags; + i = 1; + while (frags--) { + frag = &skb_shinfo(skb)->frags[i - 1]; + + g->sg[(i >> 2)].ptr[(i & 3)] = + dma_map_page(&oct->pci_dev->dev, + frag->page.p, + frag->page_offset, + frag->size, + DMA_TO_DEVICE); + + add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3)); + i++; + } + + ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev, + g->sg, g->sg_size, + DMA_TO_DEVICE); + if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) { + dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n", + __func__); + dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0], + skb->len - skb->data_len, + DMA_TO_DEVICE); + return NETDEV_TX_BUSY; + } + + finfo->dptr = ndata.cmd.dptr; + finfo->g = g; + + ndata.reqtype = REQTYPE_NORESP_NET_SG; + } + + if (skb_shinfo(skb)->gso_size) { + struct octeon_instr_irh *irh = + (struct octeon_instr_irh *)&ndata.cmd.irh; + union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0]; + + irh->len = 1; /* to indicate that ossp[0] contains tx_info */ + tx_info->s.gso_size = skb_shinfo(skb)->gso_size; + tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs; + } + + xmit_more = skb->xmit_more; + + if (unlikely(cmdsetup.s.timestamp)) + status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more); + else + status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more); + if (status == IQ_SEND_FAILED) + goto lio_xmit_failed; + + netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n"); + + if (status == IQ_SEND_STOP) + stop_q(lio->netdev, q_idx); + + netdev->trans_start = jiffies; + + stats->tx_done++; + stats->tx_tot_bytes += skb->len; + + return NETDEV_TX_OK; + +lio_xmit_failed: + stats->tx_dropped++; + netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n", + iq_no, stats->tx_dropped); + dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr, + ndata.datasize, DMA_TO_DEVICE); + recv_buffer_free(skb); + return NETDEV_TX_OK; +} + +/** \brief Network device Tx timeout + * @param netdev pointer to network device + */ +static void liquidio_tx_timeout(struct net_device *netdev) +{ + struct lio *lio; + + lio = GET_LIO(netdev); + + netif_info(lio, tx_err, lio->netdev, + "Transmit timeout tx_dropped:%ld, waking up queues now!!\n", + netdev->stats.tx_dropped); + netdev->trans_start = jiffies; + txqs_wake(netdev); +} + +int liquidio_set_feature(struct net_device *netdev, int cmd) +{ + struct lio *lio = GET_LIO(netdev); + struct octeon_device *oct = lio->oct_dev; + struct octnic_ctrl_pkt nctrl; + struct octnic_ctrl_params nparams; + int ret = 0; + + memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt)); + + nctrl.ncmd.u64 = 0; + nctrl.ncmd.s.cmd = cmd; + nctrl.ncmd.s.param1 = lio->linfo.ifidx; + nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6; + nctrl.wait_time = 100; + nctrl.netpndev = (u64)netdev; + nctrl.cb_fn = liquidio_link_ctrl_cmd_completion; + + nparams.resp_order = OCTEON_RESP_NORESPONSE; + + ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams); + if (ret < 0) { + dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", + ret); + } + return ret; +} + +/** \brief Net device fix features + * @param netdev pointer to network device + * @param request features requested + * @returns updated features list + */ +static netdev_features_t liquidio_fix_features(struct net_device *netdev, + netdev_features_t request) +{ + struct lio *lio = netdev_priv(netdev); + + if ((request & NETIF_F_RXCSUM) && + !(lio->dev_capability & NETIF_F_RXCSUM)) + request &= ~NETIF_F_RXCSUM; + + if ((request & NETIF_F_HW_CSUM) && + !(lio->dev_capability & NETIF_F_HW_CSUM)) + request &= ~NETIF_F_HW_CSUM; + + if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO)) + request &= ~NETIF_F_TSO; + + if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6)) + request &= ~NETIF_F_TSO6; + + if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + /*Disable LRO if RXCSUM is off */ + if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + request &= ~NETIF_F_LRO; + + return request; +} + +/** \brief Net device set features + * @param netdev pointer to network device + * @param features features to enable/disable + */ +static int liquidio_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct lio *lio = netdev_priv(netdev); + + if (!((netdev->features ^ features) & NETIF_F_LRO)) + return 0; + + if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + else if (!(features & NETIF_F_LRO) && + (lio->dev_capability & NETIF_F_LRO)) + liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE); + + return 0; +} + +static struct net_device_ops lionetdevops = { + .ndo_open = liquidio_open, + .ndo_stop = liquidio_stop, + .ndo_start_xmit = liquidio_xmit, + .ndo_get_stats = liquidio_get_stats, + .ndo_set_mac_address = liquidio_set_mac, + .ndo_set_rx_mode = liquidio_set_mcast_list, + .ndo_tx_timeout = liquidio_tx_timeout, + .ndo_change_mtu = liquidio_change_mtu, + .ndo_do_ioctl = liquidio_ioctl, + .ndo_fix_features = liquidio_fix_features, + .ndo_set_features = liquidio_set_features, +}; + +/** \brief Entry point for the liquidio module + */ +static int __init liquidio_init(void) +{ + int i; + struct handshake *hs; + + init_completion(&first_stage); + + octeon_init_device_list(conf_type); + + if (liquidio_init_pci()) + return -EINVAL; + + wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000)); + + for (i = 0; i < MAX_OCTEON_DEVICES; i++) { + hs = &handshake[i]; + if (hs->pci_dev) { + wait_for_completion(&hs->init); + if (!hs->init_ok) { + /* init handshake failed */ + dev_err(&hs->pci_dev->dev, + "Failed to init device\n"); + liquidio_deinit_pci(); + return -EIO; + } + } + } + + for (i = 0; i < MAX_OCTEON_DEVICES; i++) { + hs = &handshake[i]; + if (hs->pci_dev) { + wait_for_completion_timeout(&hs->started, + msecs_to_jiffies(30000)); + if (!hs->started_ok) { + /* starter handshake failed */ + dev_err(&hs->pci_dev->dev, + "Firmware failed to start\n"); + liquidio_deinit_pci(); + return -EIO; + } + } + } + + return 0; +} + +int lio_nic_info(struct octeon_recv_info *recv_info, void *buf) +{ + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + int ifidx = 0; + union oct_link_status *ls; + int i; + + if ((recv_pkt->buffer_size[0] != sizeof(*ls)) || + (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) { + dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n", + recv_pkt->buffer_size[0], + recv_pkt->rh.r_nic_info.ifidx); + goto nic_info_err; + } + + ifidx = recv_pkt->rh.r_nic_info.ifidx; + ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]); + + octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3); + + update_link_status(oct->props[ifidx].netdev, ls); + +nic_info_err: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + return 0; +} + +/** + * \brief Setup network interfaces + * @param octeon_dev octeon device + * + * Called during init time for each device. It assumes the NIC + * is already up and running. The link information for each + * interface is passed in link_info. + */ +static int setup_nic_devices(struct octeon_device *octeon_dev) +{ + struct lio *lio = NULL; + struct net_device *netdev; + u8 mac[6], i, j; + struct octeon_soft_command *sc; + struct liquidio_if_cfg_context *ctx; + struct liquidio_if_cfg_resp *resp; + struct octdev_props *props; + int retval, num_iqueues, num_oqueues, q_no; + u64 q_mask; + int num_cpus = num_online_cpus(); + union oct_nic_if_cfg if_cfg; + unsigned int base_queue; + unsigned int gmx_port_id; + u32 resp_size, ctx_size; + + /* This is to handle link status changes */ + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, + OPCODE_NIC_INFO, + lio_nic_info, octeon_dev); + + /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions. + * They are handled directly. + */ + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET, + free_netbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG, + free_netsgbuf); + + octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG, + free_netsgbuf_with_resp); + + for (i = 0; i < octeon_dev->ifcount; i++) { + resp_size = sizeof(struct liquidio_if_cfg_resp); + ctx_size = sizeof(struct liquidio_if_cfg_context); + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(octeon_dev, 0, + resp_size, ctx_size); + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; + + num_iqueues = + CFG_GET_NUM_TXQS_NIC_IF(octeon_get_conf(octeon_dev), i); + num_oqueues = + CFG_GET_NUM_RXQS_NIC_IF(octeon_get_conf(octeon_dev), i); + base_queue = + CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i); + gmx_port_id = + CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i); + if (num_iqueues > num_cpus) + num_iqueues = num_cpus; + if (num_oqueues > num_cpus) + num_oqueues = num_cpus; + dev_dbg(&octeon_dev->pci_dev->dev, + "requesting config for interface %d, iqs %d, oqs %d\n", + i, num_iqueues, num_oqueues); + ACCESS_ONCE(ctx->cond) = 0; + ctx->octeon_id = lio_get_device_id(octeon_dev); + init_waitqueue_head(&ctx->wc); + + if_cfg.u64 = 0; + if_cfg.s.num_iqueues = num_iqueues; + if_cfg.s.num_oqueues = num_oqueues; + if_cfg.s.base_queue = base_queue; + if_cfg.s.gmx_port_id = gmx_port_id; + octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC, + OPCODE_NIC_IF_CFG, i, + if_cfg.u64, 0); + + sc->callback = if_cfg_callback; + sc->callback_arg = sc; + sc->wait_time = 1000; + + retval = octeon_send_soft_command(octeon_dev, sc); + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, + "iq/oq config failed status: %x\n", + retval); + /* Soft instr is freed by driver in case of failure. */ + goto setup_nic_dev_fail; + } + + /* Sleep on a wait queue till the cond flag indicates that the + * response arrived or timed-out. + */ + sleep_cond(&ctx->wc, &ctx->cond); + retval = resp->status; + if (retval) { + dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); + goto setup_nic_dev_fail; + } + + octeon_swap_8B_data((u64 *)(&resp->cfg_info), + (sizeof(struct liquidio_if_cfg_info)) >> 3); + + num_iqueues = hweight64(resp->cfg_info.iqmask); + num_oqueues = hweight64(resp->cfg_info.oqmask); + + if (!(num_iqueues) || !(num_oqueues)) { + dev_err(&octeon_dev->pci_dev->dev, + "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n", + resp->cfg_info.iqmask, + resp->cfg_info.oqmask); + goto setup_nic_dev_fail; + } + dev_dbg(&octeon_dev->pci_dev->dev, + "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", + i, resp->cfg_info.iqmask, resp->cfg_info.oqmask, + num_iqueues, num_oqueues); + netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues); + + if (!netdev) { + dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n"); + goto setup_nic_dev_fail; + } + + props = &octeon_dev->props[i]; + props->netdev = netdev; + + if (num_iqueues > 1) + lionetdevops.ndo_select_queue = select_q; + + /* Associate the routines that will handle different + * netdev tasks. + */ + netdev->netdev_ops = &lionetdevops; + + lio = GET_LIO(netdev); + + memset(lio, 0, sizeof(struct lio)); + + lio->linfo.ifidx = resp->cfg_info.ifidx; + lio->ifidx = resp->cfg_info.ifidx; + + lio->linfo.num_rxpciq = num_oqueues; + lio->linfo.num_txpciq = num_iqueues; + q_mask = resp->cfg_info.oqmask; + /* q_mask is 0-based and already verified mask is nonzero */ + for (j = 0; j < num_oqueues; j++) { + q_no = __ffs64(q_mask); + q_mask &= (~(1UL << q_no)); + lio->linfo.rxpciq[j] = q_no; + } + q_mask = resp->cfg_info.iqmask; + for (j = 0; j < num_iqueues; j++) { + q_no = __ffs64(q_mask); + q_mask &= (~(1UL << q_no)); + lio->linfo.txpciq[j] = q_no; + } + lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; + lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; + lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64; + + lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + lio->dev_capability = NETIF_F_HIGHDMA + | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_SG | NETIF_F_RXCSUM + | NETIF_F_TSO | NETIF_F_TSO6 + | NETIF_F_LRO; + netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE); + + netdev->features = lio->dev_capability; + netdev->vlan_features = lio->dev_capability; + + netdev->hw_features = lio->dev_capability; + + /* Point to the properties for octeon device to which this + * interface belongs. + */ + lio->oct_dev = octeon_dev; + lio->octprops = props; + lio->netdev = netdev; + spin_lock_init(&lio->lock); + + dev_dbg(&octeon_dev->pci_dev->dev, + "if%d gmx: %d hw_addr: 0x%llx\n", i, + lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr)); + + /* 64-bit swap required on LE machines */ + octeon_swap_8B_data(&lio->linfo.hw_addr, 1); + for (j = 0; j < 6; j++) + mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j)); + + /* Copy MAC Address to OS network device structure */ + + ether_addr_copy(netdev->dev_addr, mac); + + if (setup_io_queues(octeon_dev, netdev)) { + dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n"); + goto setup_nic_dev_fail; + } + + ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); + + /* By default all interfaces on a single Octeon uses the same + * tx and rx queues + */ + lio->txq = lio->linfo.txpciq[0]; + lio->rxq = lio->linfo.rxpciq[0]; + + lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq); + lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq); + + if (setup_glist(lio)) { + dev_err(&octeon_dev->pci_dev->dev, + "Gather list allocation failed\n"); + goto setup_nic_dev_fail; + } + + /* Register ethtool support */ + liquidio_set_ethtool_ops(netdev); + + liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE); + + if ((debug != -1) && (debug & NETIF_MSG_HW)) + liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE); + + /* Register the network device with the OS */ + if (register_netdev(netdev)) { + dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n"); + goto setup_nic_dev_fail; + } + + dev_dbg(&octeon_dev->pci_dev->dev, + "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", + i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); + netif_carrier_off(netdev); + + if (lio->linfo.link.s.status) { + netif_carrier_on(netdev); + start_txq(netdev); + } else { + netif_carrier_off(netdev); + } + + ifstate_set(lio, LIO_IFSTATE_REGISTERED); + + dev_dbg(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup successful\n", i); + + octeon_free_soft_command(octeon_dev, sc); + } + + return 0; + +setup_nic_dev_fail: + + octeon_free_soft_command(octeon_dev, sc); + + while (i--) { + dev_err(&octeon_dev->pci_dev->dev, + "NIC ifidx:%d Setup failed\n", i); + liquidio_destroy_nic_device(octeon_dev, i); + } + return -ENODEV; +} + +/** + * \brief initialize the NIC + * @param oct octeon device + * + * This initialization routine is called once the Octeon device application is + * up and running + */ +static int liquidio_init_nic_module(struct octeon_device *oct) +{ + struct oct_intrmod_cfg *intrmod_cfg; + int retval = 0; + int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct)); + + dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n"); + + /* only default iq and oq were initialized + * initialize the rest as well + */ + /* run port_config command for each port */ + oct->ifcount = num_nic_ports; + + memset(oct->props, 0, + sizeof(struct octdev_props) * num_nic_ports); + + retval = setup_nic_devices(oct); + if (retval) { + dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n"); + goto octnet_init_failure; + } + + liquidio_ptp_init(oct); + + /* Initialize interrupt moderation params */ + intrmod_cfg = &((struct octeon_device *)oct)->intrmod; + intrmod_cfg->intrmod_enable = 1; + intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL; + intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR; + intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR; + intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER; + intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER; + intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER; + intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER; + + dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n"); + + return retval; + +octnet_init_failure: + + oct->ifcount = 0; + + return retval; +} + +/** + * \brief starter callback that invokes the remaining initialization work after + * the NIC is up and running. + * @param octptr work struct work_struct + */ +static void nic_starter(struct work_struct *work) +{ + struct octeon_device *oct; + struct cavium_wk *wk = (struct cavium_wk *)work; + + oct = (struct octeon_device *)wk->ctxptr; + + if (atomic_read(&oct->status) == OCT_DEV_RUNNING) + return; + + /* If the status of the device is CORE_OK, the core + * application has reported its application type. Call + * any registered handlers now and move to the RUNNING + * state. + */ + if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) { + schedule_delayed_work(&oct->nic_poll_work.work, + LIQUIDIO_STARTER_POLL_INTERVAL_MS); + return; + } + + atomic_set(&oct->status, OCT_DEV_RUNNING); + + if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) { + dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n"); + + if (liquidio_init_nic_module(oct)) + dev_err(&oct->pci_dev->dev, "NIC initialization failed\n"); + else + handshake[oct->octeon_id].started_ok = 1; + } else { + dev_err(&oct->pci_dev->dev, + "Unexpected application running on NIC (%d). Check firmware.\n", + oct->app_mode); + } + + complete(&handshake[oct->octeon_id].started); +} + +/** + * \brief Device initialization for each Octeon device that is probed + * @param octeon_dev octeon device + */ +static int octeon_device_init(struct octeon_device *octeon_dev) +{ + int j, ret; + struct octeon_device_priv *oct_priv = + (struct octeon_device_priv *)octeon_dev->priv; + atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE); + + /* Enable access to the octeon device and make its DMA capability + * known to the OS. + */ + if (octeon_pci_os_setup(octeon_dev)) + return 1; + + /* Identify the Octeon type and map the BAR address space. */ + if (octeon_chip_specific_setup(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); + return 1; + } + + atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE); + + octeon_dev->app_mode = CVM_DRV_INVALID_APP; + + /* Do a soft reset of the Octeon device. */ + if (octeon_dev->fn_list.soft_reset(octeon_dev)) + return 1; + + /* Initialize the dispatch mechanism used to push packets arriving on + * Octeon Output queues. + */ + if (octeon_init_dispatch_list(octeon_dev)) + return 1; + + octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC, + OPCODE_NIC_CORE_DRV_ACTIVE, + octeon_core_drv_init, + octeon_dev); + + INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter); + octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev; + schedule_delayed_work(&octeon_dev->nic_poll_work.work, + LIQUIDIO_STARTER_POLL_INTERVAL_MS); + + atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE); + + octeon_set_io_queues_off(octeon_dev); + + /* Setup the data structures that manage this Octeon's Input queues. */ + if (octeon_setup_instr_queues(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, + "instruction queue initialization failed\n"); + /* On error, release any previously allocated queues */ + for (j = 0; j < octeon_dev->num_iqs; j++) + octeon_delete_instr_queue(octeon_dev, j); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); + + /* Initialize soft command buffer pool + */ + if (octeon_setup_sc_buffer_pool(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE); + + /* Initialize lists to manage the requests of different types that + * arrive from user & kernel applications for this octeon device. + */ + if (octeon_setup_response_list(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n"); + return 1; + } + atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE); + + if (octeon_setup_output_queues(octeon_dev)) { + dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); + /* Release any previously allocated queues */ + for (j = 0; j < octeon_dev->num_oqs; j++) + octeon_delete_droq(octeon_dev, j); + } + + atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE); + + /* The input and output queue registers were setup earlier (the queues + * were not enabled). Any additional registers that need to be + * programmed should be done now. + */ + ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, + "Failed to configure device registers\n"); + return ret; + } + + /* Initialize the tasklet that handles output queue packet processing.*/ + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n"); + tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh, + (unsigned long)octeon_dev); + + /* Setup the interrupt handler and record the INT SUM register address + */ + octeon_setup_interrupt(octeon_dev); + + /* Enable Octeon device interrupts */ + octeon_dev->fn_list.enable_interrupt(octeon_dev->chip); + + /* Enable the input and output queues for this Octeon device */ + octeon_dev->fn_list.enable_io_queues(octeon_dev); + + atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE); + + dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n"); + + if (ddr_timeout == 0) { + dev_info(&octeon_dev->pci_dev->dev, + "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); + } + + schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS); + + /* Wait for the octeon to initialize DDR after the soft-reset. */ + ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, + "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", + ret); + return 1; + } + + if (octeon_wait_for_bootloader(octeon_dev, 1000) != 0) { + dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n"); + return 1; + } + + dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n"); + ret = octeon_init_consoles(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n"); + return 1; + } + ret = octeon_add_console(octeon_dev, 0); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n"); + return 1; + } + + atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE); + + dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n"); + ret = load_firmware(octeon_dev); + if (ret) { + dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n"); + return 1; + } + + handshake[octeon_dev->octeon_id].init_ok = 1; + complete(&handshake[octeon_dev->octeon_id].init); + + atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK); + + /* Send Credit for Octeon Output queues. Credits are always sent after + * the output queue is enabled. + */ + for (j = 0; j < octeon_dev->num_oqs; j++) + writel(octeon_dev->droq[j]->max_count, + octeon_dev->droq[j]->pkts_credit_reg); + + /* Packets can start arriving on the output queues from this point. */ + + return 0; +} + +/** + * \brief Exits the module + */ +static void __exit liquidio_exit(void) +{ + liquidio_deinit_pci(); + + pr_info("LiquidIO network module is now unloaded\n"); +} + +module_init(liquidio_init); +module_exit(liquidio_exit); diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h new file mode 100644 index 000000000000..0ac347ccc8ba --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h @@ -0,0 +1,673 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file liquidio_common.h + * \brief Common: Structures and macros used in PCI-NIC package by core and + * host driver. + */ + +#ifndef __LIQUIDIO_COMMON_H__ +#define __LIQUIDIO_COMMON_H__ + +#include "octeon_config.h" + +#define LIQUIDIO_VERSION "1.1.9" +#define LIQUIDIO_MAJOR_VERSION 1 +#define LIQUIDIO_MINOR_VERSION 1 +#define LIQUIDIO_MICRO_VERSION 9 + +#define CONTROL_IQ 0 +/** Tag types used by Octeon cores in its work. */ +enum octeon_tag_type { + ORDERED_TAG = 0, + ATOMIC_TAG = 1, + NULL_TAG = 2, + NULL_NULL_TAG = 3 +}; + +/* pre-defined host->NIC tag values */ +#define LIO_CONTROL (0x11111110) +#define LIO_DATA(i) (0x11111111 + (i)) + +/* Opcodes used by host driver/apps to perform operations on the core. + * These are used to identify the major subsystem that the operation + * is for. + */ +#define OPCODE_CORE 0 /* used for generic core operations */ +#define OPCODE_NIC 1 /* used for NIC operations */ +#define OPCODE_LAST OPCODE_NIC + +/* Subcodes are used by host driver/apps to identify the sub-operation + * for the core. They only need to by unique for a given subsystem. + */ +#define OPCODE_SUBCODE(op, sub) (((op & 0x0f) << 8) | ((sub) & 0x7f)) + +/** OPCODE_CORE subcodes. For future use. */ + +/** OPCODE_NIC subcodes */ + +/* This subcode is sent by core PCI driver to indicate cores are ready. */ +#define OPCODE_NIC_CORE_DRV_ACTIVE 0x01 +#define OPCODE_NIC_NW_DATA 0x02 /* network packet data */ +#define OPCODE_NIC_CMD 0x03 +#define OPCODE_NIC_INFO 0x04 +#define OPCODE_NIC_PORT_STATS 0x05 +#define OPCODE_NIC_MDIO45 0x06 +#define OPCODE_NIC_TIMESTAMP 0x07 +#define OPCODE_NIC_INTRMOD_CFG 0x08 +#define OPCODE_NIC_IF_CFG 0x09 + +#define CORE_DRV_TEST_SCATTER_OP 0xFFF5 + +#define OPCODE_SLOW_PATH(rh) \ + (OPCODE_SUBCODE(rh->r.opcode, rh->r.subcode) != \ + OPCODE_SUBCODE(OPCODE_NIC, OPCODE_NIC_NW_DATA)) + +/* Application codes advertised by the core driver initialization packet. */ +#define CVM_DRV_APP_START 0x0 +#define CVM_DRV_NO_APP 0 +#define CVM_DRV_APP_COUNT 0x2 +#define CVM_DRV_BASE_APP (CVM_DRV_APP_START + 0x0) +#define CVM_DRV_NIC_APP (CVM_DRV_APP_START + 0x1) +#define CVM_DRV_INVALID_APP (CVM_DRV_APP_START + 0x2) +#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1) + +/* Macro to increment index. + * Index is incremented by count; if the sum exceeds + * max, index is wrapped-around to the start. + */ +#define INCR_INDEX(index, count, max) \ +do { \ + if (((index) + (count)) >= (max)) \ + index = ((index) + (count)) - (max); \ + else \ + index += (count); \ +} while (0) + +#define INCR_INDEX_BY1(index, max) \ +do { \ + if ((++(index)) == (max)) \ + index = 0; \ +} while (0) + +#define DECR_INDEX(index, count, max) \ +do { \ + if ((count) > (index)) \ + index = ((max) - ((count - index))); \ + else \ + index -= count; \ +} while (0) + +#define OCT_BOARD_NAME 32 +#define OCT_SERIAL_LEN 64 + +/* Structure used by core driver to send indication that the Octeon + * application is ready. + */ +struct octeon_core_setup { + u64 corefreq; + + char boardname[OCT_BOARD_NAME]; + + char board_serial_number[OCT_SERIAL_LEN]; + + u64 board_rev_major; + + u64 board_rev_minor; + +}; + +/*--------------------------- SCATTER GATHER ENTRY -----------------------*/ + +/* The Scatter-Gather List Entry. The scatter or gather component used with + * a Octeon input instruction has this format. + */ +struct octeon_sg_entry { + /** The first 64 bit gives the size of data in each dptr.*/ + union { + u16 size[4]; + u64 size64; + } u; + + /** The 4 dptr pointers for this entry. */ + u64 ptr[4]; + +}; + +#define OCT_SG_ENTRY_SIZE (sizeof(struct octeon_sg_entry)) + +/* \brief Add size to gather list + * @param sg_entry scatter/gather entry + * @param size size to add + * @param pos position to add it. + */ +static inline void add_sg_size(struct octeon_sg_entry *sg_entry, + u16 size, + u32 pos) +{ +#ifdef __BIG_ENDIAN_BITFIELD + sg_entry->u.size[pos] = size; +#else + sg_entry->u.size[3 - pos] = size; +#endif +} + +/*------------------------- End Scatter/Gather ---------------------------*/ + +#define OCTNET_FRM_PTP_HEADER_SIZE 8 +#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */ + +#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE) +#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE) + +#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE) + +/** NIC Commands are sent using this Octeon Input Queue */ +#define OCTNET_CMD_Q 0 + +/* NIC Command types */ +#define OCTNET_CMD_CHANGE_MTU 0x1 +#define OCTNET_CMD_CHANGE_MACADDR 0x2 +#define OCTNET_CMD_CHANGE_DEVFLAGS 0x3 +#define OCTNET_CMD_RX_CTL 0x4 + +#define OCTNET_CMD_SET_MULTI_LIST 0x5 +#define OCTNET_CMD_CLEAR_STATS 0x6 + +/* command for setting the speed, duplex & autoneg */ +#define OCTNET_CMD_SET_SETTINGS 0x7 +#define OCTNET_CMD_SET_FLOW_CTL 0x8 + +#define OCTNET_CMD_MDIO_READ_WRITE 0x9 +#define OCTNET_CMD_GPIO_ACCESS 0xA +#define OCTNET_CMD_LRO_ENABLE 0xB +#define OCTNET_CMD_LRO_DISABLE 0xC +#define OCTNET_CMD_SET_RSS 0xD +#define OCTNET_CMD_WRITE_SA 0xE +#define OCTNET_CMD_DELETE_SA 0xF +#define OCTNET_CMD_UPDATE_SA 0x12 + +#define OCTNET_CMD_TNL_RX_CSUM_CTL 0x10 +#define OCTNET_CMD_TNL_TX_CSUM_CTL 0x11 +#define OCTNET_CMD_IPSECV2_AH_ESP_CTL 0x13 +#define OCTNET_CMD_VERBOSE_ENABLE 0x14 +#define OCTNET_CMD_VERBOSE_DISABLE 0x15 + +/* RX(packets coming from wire) Checksum verification flags */ +/* TCP/UDP csum */ +#define CNNIC_L4SUM_VERIFIED 0x1 +#define CNNIC_IPSUM_VERIFIED 0x2 +#define CNNIC_TUN_CSUM_VERIFIED 0x4 +#define CNNIC_CSUM_VERIFIED (CNNIC_IPSUM_VERIFIED | CNNIC_L4SUM_VERIFIED) + +/*LROIPV4 and LROIPV6 Flags*/ +#define OCTNIC_LROIPV4 0x1 +#define OCTNIC_LROIPV6 0x2 + +/* Interface flags communicated between host driver and core app. */ +enum octnet_ifflags { + OCTNET_IFFLAG_PROMISC = 0x01, + OCTNET_IFFLAG_ALLMULTI = 0x02, + OCTNET_IFFLAG_MULTICAST = 0x04, + OCTNET_IFFLAG_BROADCAST = 0x08, + OCTNET_IFFLAG_UNICAST = 0x10 +}; + +/* wqe + * --------------- 0 + * | wqe word0-3 | + * --------------- 32 + * | PCI IH | + * --------------- 40 + * | RPTR | + * --------------- 48 + * | PCI IRH | + * --------------- 56 + * | OCT_NET_CMD | + * --------------- 64 + * | Addtl 8-BData | + * | | + * --------------- + */ + +union octnet_cmd { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 cmd:5; + + u64 more:6; /* How many udd words follow the command */ + + u64 param1:29; + + u64 param2:16; + + u64 param3:8; + +#else + + u64 param3:8; + + u64 param2:16; + + u64 param1:29; + + u64 more:6; + + u64 cmd:5; + +#endif + } s; + +}; + +#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd)) + +/** Instruction Header */ +struct octeon_instr_ih { +#ifdef __BIG_ENDIAN_BITFIELD + /** Raw mode indicator 1 = RAW */ + u64 raw:1; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Front Data size */ + u64 fsz:6; + + /** Packet Order / Work Unit selection (1 of 8)*/ + u64 qos:3; + + /** Core group selection (1 of 16) */ + u64 grp:4; + + /** Short Raw Packet Indicator 1=short raw pkt */ + u64 rs:1; + + /** Tag type */ + u64 tagtype:2; + + /** Tag Value */ + u64 tag:32; +#else + /** Tag Value */ + u64 tag:32; + + /** Tag type */ + u64 tagtype:2; + + /** Short Raw Packet Indicator 1=short raw pkt */ + u64 rs:1; + + /** Core group selection (1 of 16) */ + u64 grp:4; + + /** Packet Order / Work Unit selection (1 of 8)*/ + u64 qos:3; + + /** Front Data size */ + u64 fsz:6; + + /** Data length OR no. of entries in gather list */ + u64 dlengsz:14; + + /** Gather indicator 1=gather*/ + u64 gather:1; + + /** Raw mode indicator 1 = RAW */ + u64 raw:1; +#endif +}; + +/** Input Request Header */ +struct octeon_instr_irh { +#ifdef __BIG_ENDIAN_BITFIELD + u64 opcode:4; + u64 rflag:1; + u64 subcode:7; + u64 len:3; + u64 rid:13; + u64 reserved:4; + u64 ossp:32; /* opcode/subcode specific parameters */ +#else + u64 ossp:32; /* opcode/subcode specific parameters */ + u64 reserved:4; + u64 rid:13; + u64 len:3; + u64 subcode:7; + u64 rflag:1; + u64 opcode:4; +#endif +}; + +/** Return Data Parameters */ +struct octeon_instr_rdp { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:49; + u64 pcie_port:3; + u64 rlen:12; +#else + u64 rlen:12; + u64 pcie_port:3; + u64 reserved:49; +#endif +}; + +/** Receive Header */ +union octeon_rh { +#ifdef __BIG_ENDIAN_BITFIELD + u64 u64; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 rid:13; /** request id in response to pkt sent by host */ + u64 reserved:4; + u64 ossp:32; /** opcode/subcode specific parameters */ + } r; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 rid:13; /** request id in response to pkt sent by host */ + u64 extra:24; + u64 link:8; + u64 csum_verified:3; /** checksum verified. */ + u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */ + } r_dh; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 rid:13; /** request id in response to pkt sent by host */ + u64 num_gmx_ports:8; + u64 max_nic_ports:8; + u64 app_cap_flags:4; + u64 app_mode:16; + } r_core_drv_init; + struct { + u64 opcode:4; + u64 subcode:8; + u64 len:3; /** additional 64-bit words */ + u64 rid:13; + u64 reserved:4; + u64 extra:25; + u64 ifidx:7; + } r_nic_info; +#else + u64 u64; + struct { + u64 ossp:32; /** opcode/subcode specific parameters */ + u64 reserved:4; + u64 rid:13; /** req id in response to pkt sent by host */ + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r; + struct { + u64 has_hwtstamp:1; /** 1 = has hwtstamp */ + u64 csum_verified:3; /** checksum verified. */ + u64 link:8; + u64 extra:24; + u64 rid:13; /** req id in response to pkt sent by host */ + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r_dh; + struct { + u64 app_mode:16; + u64 app_cap_flags:4; + u64 max_nic_ports:8; + u64 num_gmx_ports:8; + u64 rid:13; + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r_core_drv_init; + struct { + u64 ifidx:7; + u64 extra:25; + u64 reserved:4; + u64 rid:13; + u64 len:3; /** additional 64-bit words */ + u64 subcode:8; + u64 opcode:4; + } r_nic_info; +#endif +}; + +#define OCT_RH_SIZE (sizeof(union octeon_rh)) + +#define OCT_PKT_PARAM_IPV4OPTS 1 +#define OCT_PKT_PARAM_IPV6EXTHDR 2 + +union octnic_packet_params { + u32 u32; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u32 reserved:6; + u32 tnl_csum:1; + u32 ip_csum:1; + u32 ipv4opts_ipv6exthdr:2; + u32 ipsec_ops:4; + u32 tsflag:1; + u32 csoffset:9; + u32 ifidx:8; +#else + u32 ifidx:8; + u32 csoffset:9; + u32 tsflag:1; + u32 ipsec_ops:4; + u32 ipv4opts_ipv6exthdr:2; + u32 ip_csum:1; + u32 tnl_csum:1; + u32 reserved:6; +#endif + } s; +}; + +/** Status of a RGMII Link on Octeon as seen by core driver. */ +union oct_link_status { + u64 u64; + + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 duplex:8; + u64 status:8; + u64 mtu:16; + u64 speed:16; + u64 autoneg:1; + u64 interface:4; + u64 pause:1; + u64 reserved:10; +#else + u64 reserved:10; + u64 pause:1; + u64 interface:4; + u64 autoneg:1; + u64 speed:16; + u64 mtu:16; + u64 status:8; + u64 duplex:8; +#endif + } s; +}; + +/** Information for a OCTEON ethernet interface shared between core & host. */ +struct oct_link_info { + union oct_link_status link; + u64 hw_addr; + +#ifdef __BIG_ENDIAN_BITFIELD + u16 gmxport; + u8 rsvd[3]; + u8 num_txpciq; + u8 num_rxpciq; + u8 ifidx; +#else + u8 ifidx; + u8 num_rxpciq; + u8 num_txpciq; + u8 rsvd[3]; + u16 gmxport; +#endif + + u8 txpciq[MAX_IOQS_PER_NICIF]; + u8 rxpciq[MAX_IOQS_PER_NICIF]; +}; + +#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info)) + +struct liquidio_if_cfg_info { + u64 ifidx; + u64 iqmask; /** mask for IQs enabled for the port */ + u64 oqmask; /** mask for OQs enabled for the port */ + struct oct_link_info linfo; /** initial link information */ +}; + +/** Stats for each NIC port in RX direction. */ +struct nic_rx_stats { + /* link-level stats */ + u64 total_rcvd; + u64 bytes_rcvd; + u64 total_bcst; + u64 total_mcst; + u64 runts; + u64 ctl_rcvd; + u64 fifo_err; /* Accounts for over/under-run of buffers */ + u64 dmac_drop; + u64 fcs_err; + u64 jabber_err; + u64 l2_err; + u64 frame_err; + + /* firmware stats */ + u64 fw_total_rcvd; + u64 fw_total_fwd; + u64 fw_err_pko; + u64 fw_err_link; + u64 fw_err_drop; + u64 fw_lro_pkts; /* Number of packets that are LROed */ + u64 fw_lro_octs; /* Number of octets that are LROed */ + u64 fw_total_lro; /* Number of LRO packets formed */ + u64 fw_lro_aborts; /* Number of times lRO of packet aborted */ + /* intrmod: packet forward rate */ + u64 fwd_rate; +}; + +/** Stats for each NIC port in RX direction. */ +struct nic_tx_stats { + /* link-level stats */ + u64 total_pkts_sent; + u64 total_bytes_sent; + u64 mcast_pkts_sent; + u64 bcast_pkts_sent; + u64 ctl_sent; + u64 one_collision_sent; /* Packets sent after one collision*/ + u64 multi_collision_sent; /* Packets sent after multiple collision*/ + u64 max_collision_fail; /* Packets not sent due to max collisions */ + u64 max_deferral_fail; /* Packets not sent due to max deferrals */ + u64 fifo_err; /* Accounts for over/under-run of buffers */ + u64 runts; + u64 total_collisions; /* Total number of collisions detected */ + + /* firmware stats */ + u64 fw_total_sent; + u64 fw_total_fwd; + u64 fw_err_pko; + u64 fw_err_link; + u64 fw_err_drop; +}; + +struct oct_link_stats { + struct nic_rx_stats fromwire; + struct nic_tx_stats fromhost; + +}; + +#define LIO68XX_LED_CTRL_ADDR 0x3501 +#define LIO68XX_LED_CTRL_CFGON 0x1f +#define LIO68XX_LED_CTRL_CFGOFF 0x100 +#define LIO68XX_LED_BEACON_ADDR 0x3508 +#define LIO68XX_LED_BEACON_CFGON 0x47fd +#define LIO68XX_LED_BEACON_CFGOFF 0x11fc +#define VITESSE_PHY_GPIO_DRIVEON 0x1 +#define VITESSE_PHY_GPIO_CFG 0x8 +#define VITESSE_PHY_GPIO_DRIVEOFF 0x4 +#define VITESSE_PHY_GPIO_HIGH 0x2 +#define VITESSE_PHY_GPIO_LOW 0x3 + +struct oct_mdio_cmd { + u64 op; + u64 mdio_addr; + u64 value1; + u64 value2; + u64 value3; +}; + +#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats)) + +#define LIO_INTRMOD_CHECK_INTERVAL 1 +#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */ +#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */ +#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */ +#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */ +#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */ +#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */ + +struct oct_intrmod_cfg { + u64 intrmod_enable; + u64 intrmod_check_intrvl; + u64 intrmod_maxpkt_ratethr; + u64 intrmod_minpkt_ratethr; + u64 intrmod_maxcnt_trigger; + u64 intrmod_maxtmr_trigger; + u64 intrmod_mincnt_trigger; + u64 intrmod_mintmr_trigger; +}; + +#define BASE_QUEUE_NOT_REQUESTED 65535 + +union oct_nic_if_cfg { + u64 u64; + struct { +#ifdef __BIG_ENDIAN_BITFIELD + u64 base_queue:16; + u64 num_iqueues:16; + u64 num_oqueues:16; + u64 gmx_port_id:8; + u64 reserved:8; +#else + u64 reserved:8; + u64 gmx_port_id:8; + u64 num_oqueues:16; + u64 num_iqueues:16; + u64 base_queue:16; +#endif + } s; +}; + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_image.h b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h new file mode 100644 index 000000000000..13c680bf0556 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/liquidio_image.h @@ -0,0 +1,57 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#ifndef _LIQUIDIO_IMAGE_H_ +#define _LIQUIDIO_IMAGE_H_ + +#define LIO_MAX_FW_TYPE_LEN (8) +#define LIO_MAX_FW_FILENAME_LEN (256) +#define LIO_FW_DIR "liquidio/" +#define LIO_FW_BASE_NAME "lio_" +#define LIO_FW_NAME_SUFFIX ".bin" +#define LIO_FW_NAME_TYPE_NIC "nic" +#define LIO_FW_NAME_TYPE_NONE "none" +#define LIO_MAX_FIRMWARE_VERSION_LEN 16 + +#define LIO_MAX_BOOTCMD_LEN 1024 +#define LIO_MAX_IMAGES 16 +#define LIO_NIC_MAGIC 0x434E4943 /* "CNIC" */ +struct octeon_firmware_desc { + u64 addr; + u32 len; + u32 crc32; /* crc32 of image */ +}; + +/* Following the header is a list of 64-bit aligned binary images, + * as described by the desc field. + * Numeric fields are in network byte order. + */ +struct octeon_firmware_file_header { + u32 magic; + char version[LIO_MAX_FIRMWARE_VERSION_LEN]; + char bootcmd[LIO_MAX_BOOTCMD_LEN]; + u32 num_images; + struct octeon_firmware_desc desc[LIO_MAX_IMAGES]; + u32 pad; + u32 crc32; /* header checksum */ +}; + +#endif /* _LIQUIDIO_IMAGE_H_ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h new file mode 100644 index 000000000000..62a8dd5cd3dc --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -0,0 +1,424 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file octeon_config.h + * \brief Host Driver: Configuration data structures for the host driver. + */ + +#ifndef __OCTEON_CONFIG_H__ +#define __OCTEON_CONFIG_H__ + +/*--------------------------CONFIG VALUES------------------------*/ + +/* The following macros affect the way the driver data structures + * are generated for Octeon devices. + * They can be modified. + */ + +/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support + * multiple(<= MAX_OCTEON_NICIF) Miniports + */ +#define MAX_OCTEON_NICIF 32 +#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF +#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF +#define MAX_OCTEON_MULTICAST_ADDR 32 + +/* CN6xxx IQ configuration macros */ +#define CN6XXX_MAX_INPUT_QUEUES 32 +#define CN6XXX_MAX_IQ_DESCRIPTORS 2048 +#define CN6XXX_DB_MIN 1 +#define CN6XXX_DB_MAX 8 +#define CN6XXX_DB_TIMEOUT 1 + +/* CN6xxx OQ configuration macros */ +#define CN6XXX_MAX_OUTPUT_QUEUES 32 +#define CN6XXX_MAX_OQ_DESCRIPTORS 2048 +#define CN6XXX_OQ_BUF_SIZE 1536 +#define CN6XXX_OQ_PKTSPER_INTR ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \ + (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128) +#define CN6XXX_OQ_REFIL_THRESHOLD ((CN6XXX_MAX_OQ_DESCRIPTORS < 512) ? \ + (CN6XXX_MAX_OQ_DESCRIPTORS / 4) : 128) + +#define CN6XXX_OQ_INTR_PKT 64 +#define CN6XXX_OQ_INTR_TIME 100 +#define DEFAULT_NUM_NIC_PORTS_66XX 2 +#define DEFAULT_NUM_NIC_PORTS_68XX 4 +#define DEFAULT_NUM_NIC_PORTS_68XX_210NV 2 + +/* common OCTEON configuration macros */ +#define CN6XXX_CFG_IO_QUEUES 32 +#define OCTEON_32BYTE_INSTR 32 +#define OCTEON_64BYTE_INSTR 64 +#define OCTEON_MAX_BASE_IOQ 4 +#define OCTEON_OQ_BUFPTR_MODE 0 +#define OCTEON_OQ_INFOPTR_MODE 1 + +#define OCTEON_DMA_INTR_PKT 64 +#define OCTEON_DMA_INTR_TIME 1000 + +#define MAX_TXQS_PER_INTF 8 +#define MAX_RXQS_PER_INTF 8 +#define DEF_TXQS_PER_INTF 4 +#define DEF_RXQS_PER_INTF 4 + +#define INVALID_IOQ_NO 0xff + +#define DEFAULT_POW_GRP 0 + +/* Macros to get octeon config params */ +#define CFG_GET_IQ_CFG(cfg) ((cfg)->iq) +#define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs) +#define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size) +#define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type) +#define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min) +#define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout) + +#define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs) +#define CFG_GET_OQ_INFO_PTR(cfg) ((cfg)->oq.info_ptr) +#define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr) +#define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) +#define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) +#define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) +#define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val +#define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val + +#define CFG_GET_DMA_INTR_PKT(cfg) ((cfg)->dma.dma_intr_pkt) +#define CFG_GET_DMA_INTR_TIME(cfg) ((cfg)->dma.dma_intr_time) +#define CFG_GET_NUM_NIC_PORTS(cfg) ((cfg)->num_nic_ports) +#define CFG_GET_NUM_DEF_TX_DESCS(cfg) ((cfg)->num_def_tx_descs) +#define CFG_GET_NUM_DEF_RX_DESCS(cfg) ((cfg)->num_def_rx_descs) +#define CFG_GET_DEF_RX_BUF_SIZE(cfg) ((cfg)->def_rx_buf_size) + +#define CFG_GET_MAX_TXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].max_txqs) +#define CFG_GET_NUM_TXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_txqs) +#define CFG_GET_MAX_RXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].max_rxqs) +#define CFG_GET_NUM_RXQS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_rxqs) +#define CFG_GET_NUM_RX_DESCS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_rx_descs) +#define CFG_GET_NUM_TX_DESCS_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].num_tx_descs) +#define CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].rx_buf_size) +#define CFG_GET_BASE_QUE_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].base_queue) +#define CFG_GET_GMXID_NIC_IF(cfg, idx) \ + ((cfg)->nic_if_cfg[idx].gmx_port_id) + +#define CFG_GET_CTRL_Q_GRP(cfg) ((cfg)->misc.ctrlq_grp) +#define CFG_GET_HOST_LINK_QUERY_INTERVAL(cfg) \ + ((cfg)->misc.host_link_query_interval) +#define CFG_GET_OCT_LINK_QUERY_INTERVAL(cfg) \ + ((cfg)->misc.oct_link_query_interval) +#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp) + +/* Max IOQs per OCTEON Link */ +#define MAX_IOQS_PER_NICIF 32 + +enum lio_card_type { + LIO_210SV = 0, /* Two port, 66xx */ + LIO_210NV, /* Two port, 68xx */ + LIO_410NV /* Four port, 68xx */ +}; + +#define LIO_210SV_NAME "210sv" +#define LIO_210NV_NAME "210nv" +#define LIO_410NV_NAME "410nv" + +/** Structure to define the configuration attributes for each Input queue. + * Applicable to all Octeon processors + **/ +struct octeon_iq_config { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:32; + + /** Minimum ticks to wait before checking for pending instructions. */ + u64 db_timeout:16; + + /** Minimum number of commands pending to be posted to Octeon + * before driver hits the Input queue doorbell. + */ + u64 db_min:8; + + /** Command size - 32 or 64 bytes */ + u64 instr_type:32; + + /** Pending list size (usually set to the sum of the size of all Input + * queues) + */ + u64 pending_list_size:32; + + /* Max number of IQs available */ + u64 max_iqs:8; +#else + /* Max number of IQs available */ + u64 max_iqs:8; + + /** Pending list size (usually set to the sum of the size of all Input + * queues) + */ + u64 pending_list_size:32; + + /** Command size - 32 or 64 bytes */ + u64 instr_type:32; + + /** Minimum number of commands pending to be posted to Octeon + * before driver hits the Input queue doorbell. + */ + u64 db_min:8; + + /** Minimum ticks to wait before checking for pending instructions. */ + u64 db_timeout:16; + + u64 reserved:32; +#endif +}; + +/** Structure to define the configuration attributes for each Output queue. + * Applicable to all Octeon processors + **/ +struct octeon_oq_config { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:16; + + u64 pkts_per_intr:16; + + /** Interrupt Coalescing (Time Interval). Octeon will interrupt the + * host if atleast one packet was sent in the time interval specified + * by this field. The driver uses time interval interrupt coalescing + * by default. The time is specified in microseconds. + */ + u64 oq_intr_time:16; + + /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver + * usually does not use packet count interrupt coalescing. + */ + u64 oq_intr_pkt:16; + + /** The number of buffers that were consumed during packet processing by + * the driver on this Output queue before the driver attempts to + * replenish + * the descriptor ring with new buffers. + */ + u64 refill_threshold:16; + + /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + u64 info_ptr:32; + + /* Max number of OQs available */ + u64 max_oqs:8; + +#else + /* Max number of OQs available */ + u64 max_oqs:8; + + /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */ + u64 info_ptr:32; + + /** The number of buffers that were consumed during packet processing by + * the driver on this Output queue before the driver attempts to + * replenish + * the descriptor ring with new buffers. + */ + u64 refill_threshold:16; + + /** Interrupt Coalescing (Packet Count). Octeon will interrupt the host + * only if it sent as many packets as specified by this field. + * The driver + * usually does not use packet count interrupt coalescing. + */ + u64 oq_intr_pkt:16; + + /** Interrupt Coalescing (Time Interval). Octeon will interrupt the + * host if atleast one packet was sent in the time interval specified + * by this field. The driver uses time interval interrupt coalescing + * by default. The time is specified in microseconds. + */ + u64 oq_intr_time:16; + + u64 pkts_per_intr:16; + + u64 reserved:16; +#endif + +}; + +/** This structure conatins the NIC link configuration attributes, + * common for all the OCTEON Modles. + */ +struct octeon_nic_if_config { +#ifdef __BIG_ENDIAN_BITFIELD + u64 reserved:56; + + u64 base_queue:16; + + u64 gmx_port_id:8; + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + u64 rx_buf_size:16; + + /* Num of desc for tx rings */ + u64 num_tx_descs:16; + + /* Num of desc for rx rings */ + u64 num_rx_descs:16; + + /* Actual configured value. Range could be: 1...max_rxqs */ + u64 num_rxqs:16; + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + u64 max_rxqs:16; + + /* Actual configured value. Range could be: 1...max_txqs */ + u64 num_txqs:16; + + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + u64 max_txqs:16; +#else + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + u64 max_txqs:16; + + /* Actual configured value. Range could be: 1...max_txqs */ + u64 num_txqs:16; + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + u64 max_rxqs:16; + + /* Actual configured value. Range could be: 1...max_rxqs */ + u64 num_rxqs:16; + + /* Num of desc for rx rings */ + u64 num_rx_descs:16; + + /* Num of desc for tx rings */ + u64 num_tx_descs:16; + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + u64 rx_buf_size:16; + + u64 gmx_port_id:8; + + u64 base_queue:16; + + u64 reserved:56; +#endif + +}; + +/** Structure to define the configuration attributes for meta data. + * Applicable to all Octeon processors. + */ + +struct octeon_misc_config { +#ifdef __BIG_ENDIAN_BITFIELD + /** Host link status polling period */ + u64 host_link_query_interval:32; + /** Oct link status polling period */ + u64 oct_link_query_interval:32; + + u64 enable_sli_oq_bp:1; + /** Control IQ Group */ + u64 ctrlq_grp:4; +#else + /** Control IQ Group */ + u64 ctrlq_grp:4; + /** BP for SLI OQ */ + u64 enable_sli_oq_bp:1; + /** Host link status polling period */ + u64 oct_link_query_interval:32; + /** Oct link status polling period */ + u64 host_link_query_interval:32; +#endif +}; + +/** Structure to define the configuration for all OCTEON processors. */ +struct octeon_config { + u16 card_type; + char *card_name; + + /** Input Queue attributes. */ + struct octeon_iq_config iq; + + /** Output Queue attributes. */ + struct octeon_oq_config oq; + + /** NIC Port Configuration */ + struct octeon_nic_if_config nic_if_cfg[MAX_OCTEON_NICIF]; + + /** Miscellaneous attributes */ + struct octeon_misc_config misc; + + int num_nic_ports; + + int num_def_tx_descs; + + /* Num of desc for rx rings */ + int num_def_rx_descs; + + int def_rx_buf_size; + +}; + +/* The following config values are fixed and should not be modified. */ + +/* Maximum address space to be mapped for Octeon's BAR1 index-based access. */ +#define MAX_BAR1_MAP_INDEX 2 +#define OCTEON_BAR1_ENTRY_SIZE (4 * 1024 * 1024) + +/* BAR1 Index 0 to (MAX_BAR1_MAP_INDEX - 1) for normal mapped memory access. + * Bar1 register at MAX_BAR1_MAP_INDEX used by driver for dynamic access. + */ +#define MAX_BAR1_IOREMAP_SIZE ((MAX_BAR1_MAP_INDEX + 1) * \ + OCTEON_BAR1_ENTRY_SIZE) + +/* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking + * NoResponse Lists are now maintained with each IQ. (Dec' 2007). + */ +#define MAX_RESPONSE_LISTS 4 + +/* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the + * dispatch table. + */ +#define OPCODE_MASK_BITS 6 + +/* Mask for the 6-bit lookup hash */ +#define OCTEON_OPCODE_MASK 0x3f + +/* Size of the dispatch table. The 6-bit hash can index into 2^6 entries */ +#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS) + +/* Maximum number of Octeon Instruction (command) queues */ +#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES + +/* Maximum number of Octeon Instruction (command) queues */ +#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES + +#endif /* __OCTEON_CONFIG_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c new file mode 100644 index 000000000000..466147e409c9 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c @@ -0,0 +1,723 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/** + * @file octeon_console.c + */ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +static void octeon_remote_lock(void); +static void octeon_remote_unlock(void); +static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, + const char *name, + u32 flags); + +#define MIN(a, b) min((a), (b)) +#define CAST_ULL(v) ((u64)(v)) + +#define BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR 0x0006c008 +#define BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR 0x0006c004 +#define BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR 0x0006c000 +#define BOOTLOADER_PCI_READ_DESC_ADDR 0x0006c100 +#define BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN 248 + +#define OCTEON_PCI_IO_BUF_OWNER_OCTEON 0x00000001 +#define OCTEON_PCI_IO_BUF_OWNER_HOST 0x00000002 + +/** Can change without breaking ABI */ +#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64 + +/** minimum alignment of bootmem alloced blocks */ +#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull) + +/** CVMX bootmem descriptor major version */ +#define CVMX_BOOTMEM_DESC_MAJ_VER 3 +/* CVMX bootmem descriptor minor version */ +#define CVMX_BOOTMEM_DESC_MIN_VER 0 + +/* Current versions */ +#define OCTEON_PCI_CONSOLE_MAJOR_VERSION 1 +#define OCTEON_PCI_CONSOLE_MINOR_VERSION 0 +#define OCTEON_PCI_CONSOLE_BLOCK_NAME "__pci_console" +#define OCTEON_CONSOLE_POLL_INTERVAL_MS 100 /* 10 times per second */ + +/* First three members of cvmx_bootmem_desc are left in original +** positions for backwards compatibility. +** Assumes big endian target +*/ +struct cvmx_bootmem_desc { + /** spinlock to control access to list */ + u32 lock; + + /** flags for indicating various conditions */ + u32 flags; + + u64 head_addr; + + /** incremented changed when incompatible changes made */ + u32 major_version; + + /** incremented changed when compatible changes made, + * reset to zero when major incremented + */ + u32 minor_version; + + u64 app_data_addr; + u64 app_data_size; + + /** number of elements in named blocks array */ + u32 nb_num_blocks; + + /** length of name array in bootmem blocks */ + u32 named_block_name_len; + + /** address of named memory block descriptors */ + u64 named_block_array_addr; +}; + +/* Structure that defines a single console. + * + * Note: when read_index == write_index, the buffer is empty. + * The actual usable size of each console is console_buf_size -1; + */ +struct octeon_pci_console { + u64 input_base_addr; + u32 input_read_index; + u32 input_write_index; + u64 output_base_addr; + u32 output_read_index; + u32 output_write_index; + u32 lock; + u32 buf_size; +}; + +/* This is the main container structure that contains all the information + * about all PCI consoles. The address of this structure is passed to various + * routines that operation on PCI consoles. + */ +struct octeon_pci_console_desc { + u32 major_version; + u32 minor_version; + u32 lock; + u32 flags; + u32 num_consoles; + u32 pad; + /* must be 64 bit aligned here... */ + /* Array of addresses of octeon_pci_console structures */ + u64 console_addr_array[0]; + /* Implicit storage for console_addr_array */ +}; + +/** + * This macro returns the size of a member of a structure. + * Logically it is the same as "sizeof(s::field)" in C++, but + * C lacks the "::" operator. + */ +#define SIZEOF_FIELD(s, field) sizeof(((s *)NULL)->field) + +/** + * This macro returns a member of the cvmx_bootmem_desc + * structure. These members can't be directly addressed as + * they might be in memory not directly reachable. In the case + * where bootmem is compiled with LINUX_HOST, the structure + * itself might be located on a remote Octeon. The argument + * "field" is the member name of the cvmx_bootmem_desc to read. + * Regardless of the type of the field, the return type is always + * a u64. + */ +#define CVMX_BOOTMEM_DESC_GET_FIELD(oct, field) \ + __cvmx_bootmem_desc_get(oct, oct->bootmem_desc_addr, \ + offsetof(struct cvmx_bootmem_desc, field), \ + SIZEOF_FIELD(struct cvmx_bootmem_desc, field)) + +#define __cvmx_bootmem_lock(flags) +#define __cvmx_bootmem_unlock(flags) + +/** + * This macro returns a member of the + * cvmx_bootmem_named_block_desc structure. These members can't + * be directly addressed as they might be in memory not directly + * reachable. In the case where bootmem is compiled with + * LINUX_HOST, the structure itself might be located on a remote + * Octeon. The argument "field" is the member name of the + * cvmx_bootmem_named_block_desc to read. Regardless of the type + * of the field, the return type is always a u64. The "addr" + * parameter is the physical address of the structure. + */ +#define CVMX_BOOTMEM_NAMED_GET_FIELD(oct, addr, field) \ + __cvmx_bootmem_desc_get(oct, addr, \ + offsetof(struct cvmx_bootmem_named_block_desc, field), \ + SIZEOF_FIELD(struct cvmx_bootmem_named_block_desc, field)) + +/** + * This function is the implementation of the get macros defined + * for individual structure members. The argument are generated + * by the macros inorder to read only the needed memory. + * + * @param oct Pointer to current octeon device + * @param base 64bit physical address of the complete structure + * @param offset Offset from the beginning of the structure to the member being + * accessed. + * @param size Size of the structure member. + * + * @return Value of the structure member promoted into a u64. + */ +static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct, + u64 base, + u32 offset, + u32 size) +{ + base = (1ull << 63) | (base + offset); + switch (size) { + case 4: + return octeon_read_device_mem32(oct, base); + case 8: + return octeon_read_device_mem64(oct, base); + default: + return 0; + } +} + +/** + * This function retrieves the string name of a named block. It is + * more complicated than a simple memcpy() since the named block + * descriptor may not be directly accessible. + * + * @param addr Physical address of the named block descriptor + * @param str String to receive the named block string name + * @param len Length of the string buffer, which must match the length + * stored in the bootmem descriptor. + */ +static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct, + u64 addr, + char *str, + u32 len) +{ + addr += offsetof(struct cvmx_bootmem_named_block_desc, name); + octeon_pci_read_core_mem(oct, addr, str, len); + str[len] = 0; +} + +/* See header file for descriptions of functions */ + +/** + * Check the version information on the bootmem descriptor + * + * @param exact_match + * Exact major version to check against. A zero means + * check that the version supports named blocks. + * + * @return Zero if the version is correct. Negative if the version is + * incorrect. Failures also cause a message to be displayed. + */ +static int __cvmx_bootmem_check_version(struct octeon_device *oct, + u32 exact_match) +{ + u32 major_version; + u32 minor_version; + + if (!oct->bootmem_desc_addr) + oct->bootmem_desc_addr = + octeon_read_device_mem64(oct, + BOOTLOADER_PCI_READ_DESC_ADDR); + major_version = + (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, major_version); + minor_version = + (u32)CVMX_BOOTMEM_DESC_GET_FIELD(oct, minor_version); + dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__, + major_version); + if ((major_version > 3) || + (exact_match && major_version != exact_match)) { + dev_err(&oct->pci_dev->dev, "bootmem ver mismatch %d.%d addr:0x%llx\n", + major_version, minor_version, + CAST_ULL(oct->bootmem_desc_addr)); + return -1; + } else { + return 0; + } +} + +static const struct cvmx_bootmem_named_block_desc +*__cvmx_bootmem_find_named_block_flags(struct octeon_device *oct, + const char *name, u32 flags) +{ + struct cvmx_bootmem_named_block_desc *desc = + &oct->bootmem_named_block_desc; + u64 named_addr = cvmx_bootmem_phy_named_block_find(oct, name, flags); + + if (named_addr) { + desc->base_addr = CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, + base_addr); + desc->size = + CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, size); + strncpy(desc->name, name, sizeof(desc->name)); + desc->name[sizeof(desc->name) - 1] = 0; + return &oct->bootmem_named_block_desc; + } else { + return NULL; + } +} + +static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct, + const char *name, + u32 flags) +{ + u64 result = 0; + + __cvmx_bootmem_lock(flags); + if (!__cvmx_bootmem_check_version(oct, 3)) { + u32 i; + u64 named_block_array_addr = + CVMX_BOOTMEM_DESC_GET_FIELD(oct, + named_block_array_addr); + u32 num_blocks = (u32) + CVMX_BOOTMEM_DESC_GET_FIELD(oct, nb_num_blocks); + u32 name_length = (u32) + CVMX_BOOTMEM_DESC_GET_FIELD(oct, named_block_name_len); + u64 named_addr = named_block_array_addr; + + for (i = 0; i < num_blocks; i++) { + u64 named_size = + CVMX_BOOTMEM_NAMED_GET_FIELD(oct, named_addr, + size); + if (name && named_size) { + char *name_tmp = + kmalloc(name_length + 1, GFP_KERNEL); + CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr, + name_tmp, + name_length); + if (!strncmp(name, name_tmp, name_length)) { + result = named_addr; + kfree(name_tmp); + break; + } + kfree(name_tmp); + } else if (!name && !named_size) { + result = named_addr; + break; + } + + named_addr += + sizeof(struct cvmx_bootmem_named_block_desc); + } + } + __cvmx_bootmem_unlock(flags); + return result; +} + +/** + * Find a named block on the remote Octeon + * + * @param name Name of block to find + * @param base_addr Address the block is at (OUTPUT) + * @param size The size of the block (OUTPUT) + * + * @return Zero on success, One on failure. + */ +static int octeon_named_block_find(struct octeon_device *oct, const char *name, + u64 *base_addr, u64 *size) +{ + const struct cvmx_bootmem_named_block_desc *named_block; + + octeon_remote_lock(); + named_block = __cvmx_bootmem_find_named_block_flags(oct, name, 0); + octeon_remote_unlock(); + if (named_block) { + *base_addr = named_block->base_addr; + *size = named_block->size; + return 0; + } + return 1; +} + +static void octeon_remote_lock(void) +{ + /* fill this in if any sharing is needed */ +} + +static void octeon_remote_unlock(void) +{ + /* fill this in if any sharing is needed */ +} + +int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, + u32 wait_hundredths) +{ + u32 len = strlen(cmd_str); + + dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str); + + if (len > BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1) { + dev_err(&oct->pci_dev->dev, "Command string too long, max length is: %d\n", + BOOTLOADER_PCI_WRITE_BUFFER_STR_LEN - 1); + return -1; + } + + if (octeon_wait_for_bootloader(oct, wait_hundredths) != 0) { + dev_err(&oct->pci_dev->dev, "Bootloader not ready for command.\n"); + return -1; + } + + /* Write command to bootloader */ + octeon_remote_lock(); + octeon_pci_write_core_mem(oct, BOOTLOADER_PCI_READ_BUFFER_DATA_ADDR, + (u8 *)cmd_str, len); + octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_LEN_ADDR, + len); + octeon_write_device_mem32(oct, BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR, + OCTEON_PCI_IO_BUF_OWNER_OCTEON); + + /* Bootloader should accept command very quickly + * if it really was ready + */ + if (octeon_wait_for_bootloader(oct, 200) != 0) { + octeon_remote_unlock(); + dev_err(&oct->pci_dev->dev, "Bootloader did not accept command.\n"); + return -1; + } + octeon_remote_unlock(); + return 0; +} + +int octeon_wait_for_bootloader(struct octeon_device *oct, + u32 wait_time_hundredths) +{ + dev_dbg(&oct->pci_dev->dev, "waiting %d0 ms for bootloader\n", + wait_time_hundredths); + + if (octeon_mem_access_ok(oct)) + return -1; + + while (wait_time_hundredths > 0 && + octeon_read_device_mem32(oct, + BOOTLOADER_PCI_READ_BUFFER_OWNER_ADDR) + != OCTEON_PCI_IO_BUF_OWNER_HOST) { + if (--wait_time_hundredths <= 0) + return -1; + schedule_timeout_uninterruptible(HZ / 100); + } + return 0; +} + +static void octeon_console_handle_result(struct octeon_device *oct, + size_t console_num, + char *buffer, s32 bytes_read) +{ + struct octeon_console *console; + + console = &oct->console[console_num]; + + console->waiting = 0; +} + +static char console_buffer[OCTEON_CONSOLE_MAX_READ_BYTES]; + +static void output_console_line(struct octeon_device *oct, + struct octeon_console *console, + size_t console_num, + char *console_buffer, + s32 bytes_read) +{ + char *line; + s32 i; + + line = console_buffer; + for (i = 0; i < bytes_read; i++) { + /* Output a line at a time, prefixed */ + if (console_buffer[i] == '\n') { + console_buffer[i] = '\0'; + if (console->leftover[0]) { + dev_info(&oct->pci_dev->dev, "%lu: %s%s\n", + console_num, console->leftover, + line); + console->leftover[0] = '\0'; + } else { + dev_info(&oct->pci_dev->dev, "%lu: %s\n", + console_num, line); + } + line = &console_buffer[i + 1]; + } + } + + /* Save off any leftovers */ + if (line != &console_buffer[bytes_read]) { + console_buffer[bytes_read] = '\0'; + strcpy(console->leftover, line); + } +} + +static void check_console(struct work_struct *work) +{ + s32 bytes_read, tries, total_read; + struct octeon_console *console; + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; + size_t console_num = wk->ctxul; + u32 delay; + + console = &oct->console[console_num]; + tries = 0; + total_read = 0; + + do { + /* Take console output regardless of whether it will + * be logged + */ + bytes_read = + octeon_console_read(oct, console_num, console_buffer, + sizeof(console_buffer) - 1, 0); + if (bytes_read > 0) { + total_read += bytes_read; + if (console->waiting) { + octeon_console_handle_result(oct, console_num, + console_buffer, + bytes_read); + } + if (octeon_console_debug_enabled(console_num)) { + output_console_line(oct, console, console_num, + console_buffer, bytes_read); + } + } else if (bytes_read < 0) { + dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n", + console_num, bytes_read); + } + + tries++; + } while ((bytes_read > 0) && (tries < 16)); + + /* If nothing is read after polling the console, + * output any leftovers if any + */ + if (octeon_console_debug_enabled(console_num) && + (total_read == 0) && (console->leftover[0])) { + dev_info(&oct->pci_dev->dev, "%lu: %s\n", + console_num, console->leftover); + console->leftover[0] = '\0'; + } + + delay = OCTEON_CONSOLE_POLL_INTERVAL_MS; + + schedule_delayed_work(&wk->work, msecs_to_jiffies(delay)); +} + +int octeon_init_consoles(struct octeon_device *oct) +{ + int ret = 0; + u64 addr, size; + + ret = octeon_mem_access_ok(oct); + if (ret) { + dev_err(&oct->pci_dev->dev, "Memory access not okay'\n"); + return ret; + } + + ret = octeon_named_block_find(oct, OCTEON_PCI_CONSOLE_BLOCK_NAME, &addr, + &size); + if (ret) { + dev_err(&oct->pci_dev->dev, "Could not find console '%s'\n", + OCTEON_PCI_CONSOLE_BLOCK_NAME); + return ret; + } + + /* num_consoles > 0, is an indication that the consoles + * are accessible + */ + oct->num_consoles = octeon_read_device_mem32(oct, + addr + offsetof(struct octeon_pci_console_desc, + num_consoles)); + oct->console_desc_addr = addr; + + dev_dbg(&oct->pci_dev->dev, "Initialized consoles. %d available\n", + oct->num_consoles); + + return ret; +} + +int octeon_add_console(struct octeon_device *oct, u32 console_num) +{ + int ret = 0; + u32 delay; + u64 coreaddr; + struct delayed_work *work; + struct octeon_console *console; + + if (console_num >= oct->num_consoles) { + dev_err(&oct->pci_dev->dev, + "trying to read from console number %d when only 0 to %d exist\n", + console_num, oct->num_consoles); + } else { + console = &oct->console[console_num]; + + console->waiting = 0; + + coreaddr = oct->console_desc_addr + console_num * 8 + + offsetof(struct octeon_pci_console_desc, + console_addr_array); + console->addr = octeon_read_device_mem64(oct, coreaddr); + coreaddr = console->addr + offsetof(struct octeon_pci_console, + buf_size); + console->buffer_size = octeon_read_device_mem32(oct, coreaddr); + coreaddr = console->addr + offsetof(struct octeon_pci_console, + input_base_addr); + console->input_base_addr = + octeon_read_device_mem64(oct, coreaddr); + coreaddr = console->addr + offsetof(struct octeon_pci_console, + output_base_addr); + console->output_base_addr = + octeon_read_device_mem64(oct, coreaddr); + console->leftover[0] = '\0'; + + work = &oct->console_poll_work[console_num].work; + + INIT_DELAYED_WORK(work, check_console); + oct->console_poll_work[console_num].ctxptr = (void *)oct; + oct->console_poll_work[console_num].ctxul = console_num; + delay = OCTEON_CONSOLE_POLL_INTERVAL_MS; + schedule_delayed_work(work, msecs_to_jiffies(delay)); + + if (octeon_console_debug_enabled(console_num)) { + ret = octeon_console_send_cmd(oct, + "setenv pci_console_active 1", + 2000); + } + + console->active = 1; + } + + return ret; +} + +/** + * Removes all consoles + * + * @param oct octeon device + */ +void octeon_remove_consoles(struct octeon_device *oct) +{ + u32 i; + struct octeon_console *console; + + for (i = 0; i < oct->num_consoles; i++) { + console = &oct->console[i]; + + if (!console->active) + continue; + + cancel_delayed_work_sync(&oct->console_poll_work[i]. + work); + console->addr = 0; + console->buffer_size = 0; + console->input_base_addr = 0; + console->output_base_addr = 0; + } + + oct->num_consoles = 0; +} + +static inline int octeon_console_free_bytes(u32 buffer_size, + u32 wr_idx, + u32 rd_idx) +{ + if (rd_idx >= buffer_size || wr_idx >= buffer_size) + return -1; + + return ((buffer_size - 1) - (wr_idx - rd_idx)) % buffer_size; +} + +static inline int octeon_console_avail_bytes(u32 buffer_size, + u32 wr_idx, + u32 rd_idx) +{ + if (rd_idx >= buffer_size || wr_idx >= buffer_size) + return -1; + + return buffer_size - 1 - + octeon_console_free_bytes(buffer_size, wr_idx, rd_idx); +} + +int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size, u32 flags) +{ + int bytes_to_read; + u32 rd_idx, wr_idx; + struct octeon_console *console; + + if (console_num >= oct->num_consoles) { + dev_err(&oct->pci_dev->dev, "Attempted to read from disabled console %d\n", + console_num); + return 0; + } + + console = &oct->console[console_num]; + + /* Check to see if any data is available. + * Maybe optimize this with 64-bit read. + */ + rd_idx = octeon_read_device_mem32(oct, console->addr + + offsetof(struct octeon_pci_console, output_read_index)); + wr_idx = octeon_read_device_mem32(oct, console->addr + + offsetof(struct octeon_pci_console, output_write_index)); + + bytes_to_read = octeon_console_avail_bytes(console->buffer_size, + wr_idx, rd_idx); + if (bytes_to_read <= 0) + return bytes_to_read; + + bytes_to_read = MIN(bytes_to_read, (s32)buf_size); + + /* Check to see if what we want to read is not contiguous, and limit + * ourselves to the contiguous block + */ + if (rd_idx + bytes_to_read >= console->buffer_size) + bytes_to_read = console->buffer_size - rd_idx; + + octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx, + buffer, bytes_to_read); + octeon_write_device_mem32(oct, console->addr + + offsetof(struct octeon_pci_console, + output_read_index), + (rd_idx + bytes_to_read) % + console->buffer_size); + + return bytes_to_read; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c new file mode 100644 index 000000000000..2ca91657295f --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c @@ -0,0 +1,1307 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/crc32.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +/** Default configuration + * for CN66XX OCTEON Models. + */ +static struct octeon_config default_cn66xx_conf = { + .card_type = LIO_210SV, + .card_name = LIO_210SV_NAME, + + /** IQ attributes */ + .iq = { + .max_iqs = CN6XXX_CFG_IO_QUEUES, + .pending_list_size = + (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN6XXX_DB_MIN, + .db_timeout = CN6XXX_DB_TIMEOUT, + } + , + + /** OQ attributes */ + .oq = { + .max_oqs = CN6XXX_CFG_IO_QUEUES, + .info_ptr = OCTEON_OQ_INFOPTR_MODE, + .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, + .oq_intr_time = CN6XXX_OQ_INTR_TIME, + .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, + } + , + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_66XX, + .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + /* For ethernet interface 0: Port cfg Attributes */ + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + /** Miscellaneous attributes */ + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } + , +}; + +/** Default configuration + * for CN68XX OCTEON Model. + */ + +static struct octeon_config default_cn68xx_conf = { + .card_type = LIO_410NV, + .card_name = LIO_410NV_NAME, + + /** IQ attributes */ + .iq = { + .max_iqs = CN6XXX_CFG_IO_QUEUES, + .pending_list_size = + (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN6XXX_DB_MIN, + .db_timeout = CN6XXX_DB_TIMEOUT, + } + , + + /** OQ attributes */ + .oq = { + .max_oqs = CN6XXX_CFG_IO_QUEUES, + .info_ptr = OCTEON_OQ_INFOPTR_MODE, + .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, + .oq_intr_time = CN6XXX_OQ_INTR_TIME, + .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, + } + , + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX, + .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + .nic_if_cfg[2] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 2, + }, + + .nic_if_cfg[3] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 3, + }, + + /** Miscellaneous attributes */ + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } + , +}; + +/** Default configuration + * for CN68XX OCTEON Model. + */ +static struct octeon_config default_cn68xx_210nv_conf = { + .card_type = LIO_210NV, + .card_name = LIO_210NV_NAME, + + /** IQ attributes */ + + .iq = { + .max_iqs = CN6XXX_CFG_IO_QUEUES, + .pending_list_size = + (CN6XXX_MAX_IQ_DESCRIPTORS * CN6XXX_CFG_IO_QUEUES), + .instr_type = OCTEON_64BYTE_INSTR, + .db_min = CN6XXX_DB_MIN, + .db_timeout = CN6XXX_DB_TIMEOUT, + } + , + + /** OQ attributes */ + .oq = { + .max_oqs = CN6XXX_CFG_IO_QUEUES, + .info_ptr = OCTEON_OQ_INFOPTR_MODE, + .refill_threshold = CN6XXX_OQ_REFIL_THRESHOLD, + .oq_intr_pkt = CN6XXX_OQ_INTR_PKT, + .oq_intr_time = CN6XXX_OQ_INTR_TIME, + .pkts_per_intr = CN6XXX_OQ_PKTSPER_INTR, + } + , + + .num_nic_ports = DEFAULT_NUM_NIC_PORTS_68XX_210NV, + .num_def_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + .num_def_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + .def_rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .nic_if_cfg[0] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 0, + }, + + .nic_if_cfg[1] = { + /* Max Txqs: Half for each of the two ports :max_iq/2 */ + .max_txqs = MAX_TXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_txqs */ + .num_txqs = DEF_TXQS_PER_INTF, + + /* Max Rxqs: Half for each of the two ports :max_oq/2 */ + .max_rxqs = MAX_RXQS_PER_INTF, + + /* Actual configured value. Range could be: 1...max_rxqs */ + .num_rxqs = DEF_RXQS_PER_INTF, + + /* Num of desc for rx rings */ + .num_rx_descs = CN6XXX_MAX_OQ_DESCRIPTORS, + + /* Num of desc for tx rings */ + .num_tx_descs = CN6XXX_MAX_IQ_DESCRIPTORS, + + /* SKB size, We need not change buf size even for Jumbo frames. + * Octeon can send jumbo frames in 4 consecutive descriptors, + */ + .rx_buf_size = CN6XXX_OQ_BUF_SIZE, + + .base_queue = BASE_QUEUE_NOT_REQUESTED, + + .gmx_port_id = 1, + }, + + /** Miscellaneous attributes */ + .misc = { + /* Host driver link query interval */ + .oct_link_query_interval = 100, + + /* Octeon link query interval */ + .host_link_query_interval = 500, + + .enable_sli_oq_bp = 0, + + /* Control queue group */ + .ctrlq_grp = 1, + } + , +}; + +enum { + OCTEON_CONFIG_TYPE_DEFAULT = 0, + NUM_OCTEON_CONFS, +}; + +static struct octeon_config_ptr { + u32 conf_type; +} oct_conf_info[MAX_OCTEON_DEVICES] = { + { + OCTEON_CONFIG_TYPE_DEFAULT, + }, { + OCTEON_CONFIG_TYPE_DEFAULT, + }, { + OCTEON_CONFIG_TYPE_DEFAULT, + }, { + OCTEON_CONFIG_TYPE_DEFAULT, + }, +}; + +static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = { + "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE", + "IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE", + "DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE", + "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET", + "INVALID" +}; + +static char oct_dev_app_str[CVM_DRV_APP_COUNT + 1][32] = { + "BASE", "NIC", "UNKNOWN"}; + +static struct octeon_device *octeon_device[MAX_OCTEON_DEVICES]; +static u32 octeon_device_count; + +static struct octeon_core_setup core_setup[MAX_OCTEON_DEVICES]; + +void oct_set_config_info(int oct_id, int conf_type) +{ + if (conf_type < 0 || conf_type > (NUM_OCTEON_CONFS - 1)) + conf_type = OCTEON_CONFIG_TYPE_DEFAULT; + oct_conf_info[oct_id].conf_type = conf_type; +} + +void octeon_init_device_list(int conf_type) +{ + int i; + + memset(octeon_device, 0, (sizeof(void *) * MAX_OCTEON_DEVICES)); + for (i = 0; i < MAX_OCTEON_DEVICES; i++) + oct_set_config_info(i, conf_type); +} + +static void *__retrieve_octeon_config_info(struct octeon_device *oct, + u16 card_type) +{ + u32 oct_id = oct->octeon_id; + void *ret = NULL; + + switch (oct_conf_info[oct_id].conf_type) { + case OCTEON_CONFIG_TYPE_DEFAULT: + if (oct->chip_id == OCTEON_CN66XX) { + ret = (void *)&default_cn66xx_conf; + } else if ((oct->chip_id == OCTEON_CN68XX) && + (card_type == LIO_210NV)) { + ret = (void *)&default_cn68xx_210nv_conf; + } else if ((oct->chip_id == OCTEON_CN68XX) && + (card_type == LIO_410NV)) { + ret = (void *)&default_cn68xx_conf; + } + break; + default: + break; + } + return ret; +} + +static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) +{ + switch (oct->chip_id) { + case OCTEON_CN66XX: + case OCTEON_CN68XX: + return lio_validate_cn6xxx_config_info(oct, conf); + + default: + break; + } + + return 1; +} + +void *oct_get_config_info(struct octeon_device *oct, u16 card_type) +{ + void *conf = NULL; + + conf = __retrieve_octeon_config_info(oct, card_type); + if (!conf) + return NULL; + + if (__verify_octeon_config_info(oct, conf)) { + dev_err(&oct->pci_dev->dev, "Configuration verification failed\n"); + return NULL; + } + + return conf; +} + +char *lio_get_state_string(atomic_t *state_ptr) +{ + s32 istate = (s32)atomic_read(state_ptr); + + if (istate > OCT_DEV_STATES || istate < 0) + return oct_dev_state_str[OCT_DEV_STATE_INVALID]; + return oct_dev_state_str[istate]; +} + +static char *get_oct_app_string(u32 app_mode) +{ + if (app_mode <= CVM_DRV_APP_END) + return oct_dev_app_str[app_mode - CVM_DRV_APP_START]; + return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START]; +} + +int octeon_download_firmware(struct octeon_device *oct, const u8 *data, + size_t size) +{ + int ret = 0; + u8 *p; + u8 *buffer; + u32 crc32_result; + u64 load_addr; + u32 image_len; + struct octeon_firmware_file_header *h; + u32 i; + + if (size < sizeof(struct octeon_firmware_file_header)) { + dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n", + (u32)size, + (u32)sizeof(struct octeon_firmware_file_header)); + return -EINVAL; + } + + h = (struct octeon_firmware_file_header *)data; + + if (h->magic != be32_to_cpu(LIO_NIC_MAGIC)) { + dev_err(&oct->pci_dev->dev, "Unrecognized firmware file.\n"); + return -EINVAL; + } + + crc32_result = + crc32(~0, data, + sizeof(struct octeon_firmware_file_header) - + sizeof(u32)) ^ ~0U; + if (crc32_result != be32_to_cpu(h->crc32)) { + dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n", + crc32_result, be32_to_cpu(h->crc32)); + return -EINVAL; + } + + if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) { + dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n", + LIQUIDIO_VERSION, h->version); + return -EINVAL; + } + + if (be32_to_cpu(h->num_images) > LIO_MAX_IMAGES) { + dev_err(&oct->pci_dev->dev, "Too many images in firmware file (%d).\n", + be32_to_cpu(h->num_images)); + return -EINVAL; + } + + dev_info(&oct->pci_dev->dev, "Firmware version: %s\n", h->version); + snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s", + h->version); + + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + memcpy(buffer, data, size); + + p = buffer + sizeof(struct octeon_firmware_file_header); + + /* load all images */ + for (i = 0; i < be32_to_cpu(h->num_images); i++) { + load_addr = be64_to_cpu(h->desc[i].addr); + image_len = be32_to_cpu(h->desc[i].len); + + /* validate the image */ + crc32_result = crc32(~0, p, image_len) ^ ~0U; + if (crc32_result != be32_to_cpu(h->desc[i].crc32)) { + dev_err(&oct->pci_dev->dev, + "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n", + i, crc32_result, + be32_to_cpu(h->desc[i].crc32)); + ret = -EINVAL; + goto done_downloading; + } + + /* download the image */ + octeon_pci_write_core_mem(oct, load_addr, p, image_len); + + p += image_len; + dev_dbg(&oct->pci_dev->dev, + "Downloaded image %d (%d bytes) to address 0x%016llx\n", + i, image_len, load_addr); + } + + /* Invoke the bootcmd */ + ret = octeon_console_send_cmd(oct, h->bootcmd, 50); + +done_downloading: + kfree(buffer); + + return ret; +} + +void octeon_free_device_mem(struct octeon_device *oct) +{ + u32 i; + + for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { + /* could check mask as well */ + if (oct->droq[i]) + vfree(oct->droq[i]); + } + + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + /* could check mask as well */ + if (oct->instr_queue[i]) + vfree(oct->instr_queue[i]); + } + + i = oct->octeon_id; + vfree(oct); + + octeon_device[i] = NULL; + octeon_device_count--; +} + +static struct octeon_device *octeon_allocate_device_mem(u32 pci_id, + u32 priv_size) +{ + struct octeon_device *oct; + u8 *buf = NULL; + u32 octdevsize = 0, configsize = 0, size; + + switch (pci_id) { + case OCTEON_CN68XX: + case OCTEON_CN66XX: + configsize = sizeof(struct octeon_cn6xxx); + break; + + default: + pr_err("%s: Unknown PCI Device: 0x%x\n", + __func__, + pci_id); + return NULL; + } + + if (configsize & 0x7) + configsize += (8 - (configsize & 0x7)); + + octdevsize = sizeof(struct octeon_device); + if (octdevsize & 0x7) + octdevsize += (8 - (octdevsize & 0x7)); + + if (priv_size & 0x7) + priv_size += (8 - (priv_size & 0x7)); + + size = octdevsize + priv_size + configsize + + (sizeof(struct octeon_dispatch) * DISPATCH_LIST_SIZE); + + buf = vmalloc(size); + if (!buf) + return NULL; + + memset(buf, 0, size); + + oct = (struct octeon_device *)buf; + oct->priv = (void *)(buf + octdevsize); + oct->chip = (void *)(buf + octdevsize + priv_size); + oct->dispatch.dlist = (struct octeon_dispatch *) + (buf + octdevsize + priv_size + configsize); + + return oct; +} + +struct octeon_device *octeon_allocate_device(u32 pci_id, + u32 priv_size) +{ + u32 oct_idx = 0; + struct octeon_device *oct = NULL; + + for (oct_idx = 0; oct_idx < MAX_OCTEON_DEVICES; oct_idx++) + if (!octeon_device[oct_idx]) + break; + + if (oct_idx == MAX_OCTEON_DEVICES) + return NULL; + + oct = octeon_allocate_device_mem(pci_id, priv_size); + if (!oct) + return NULL; + + spin_lock_init(&oct->pci_win_lock); + spin_lock_init(&oct->mem_access_lock); + + octeon_device_count++; + octeon_device[oct_idx] = oct; + + oct->octeon_id = oct_idx; + snprintf((oct->device_name), sizeof(oct->device_name), + "LiquidIO%d", (oct->octeon_id)); + + return oct; +} + +int octeon_setup_instr_queues(struct octeon_device *oct) +{ + u32 i, num_iqs = 0; + u32 num_descs = 0; + + /* this causes queue 0 to be default queue */ + if (OCTEON_CN6XXX(oct)) { + num_iqs = 1; + num_descs = + CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); + } + + oct->num_iqs = 0; + + for (i = 0; i < num_iqs; i++) { + oct->instr_queue[i] = + vmalloc(sizeof(struct octeon_instr_queue)); + if (!oct->instr_queue[i]) + return 1; + + memset(oct->instr_queue[i], 0, + sizeof(struct octeon_instr_queue)); + + oct->instr_queue[i]->app_ctx = (void *)(size_t)i; + if (octeon_init_instr_queue(oct, i, num_descs)) + return 1; + + oct->num_iqs++; + } + + return 0; +} + +int octeon_setup_output_queues(struct octeon_device *oct) +{ + u32 i, num_oqs = 0; + u32 num_descs = 0; + u32 desc_size = 0; + + /* this causes queue 0 to be default queue */ + if (OCTEON_CN6XXX(oct)) { + /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */ + num_oqs = 1; + num_descs = + CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); + desc_size = + CFG_GET_DEF_RX_BUF_SIZE(CHIP_FIELD(oct, cn6xxx, conf)); + } + + oct->num_oqs = 0; + + for (i = 0; i < num_oqs; i++) { + oct->droq[i] = vmalloc(sizeof(*oct->droq[i])); + if (!oct->droq[i]) + return 1; + + memset(oct->droq[i], 0, sizeof(struct octeon_droq)); + + if (octeon_init_droq(oct, i, num_descs, desc_size, NULL)) + return 1; + + oct->num_oqs++; + } + + return 0; +} + +void octeon_set_io_queues_off(struct octeon_device *oct) +{ + /* Disable the i/p and o/p queues for this Octeon. */ + + octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0); + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0); +} + +void octeon_set_droq_pkt_op(struct octeon_device *oct, + u32 q_no, + u32 enable) +{ + u32 reg_val = 0; + + /* Disable the i/p and o/p queues for this Octeon. */ + reg_val = octeon_read_csr(oct, CN6XXX_SLI_PKT_OUT_ENB); + + if (enable) + reg_val = reg_val | (1 << q_no); + else + reg_val = reg_val & (~(1 << q_no)); + + octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, reg_val); +} + +int octeon_init_dispatch_list(struct octeon_device *oct) +{ + u32 i; + + oct->dispatch.count = 0; + + for (i = 0; i < DISPATCH_LIST_SIZE; i++) { + oct->dispatch.dlist[i].opcode = 0; + INIT_LIST_HEAD(&oct->dispatch.dlist[i].list); + } + + for (i = 0; i <= REQTYPE_LAST; i++) + octeon_register_reqtype_free_fn(oct, i, NULL); + + spin_lock_init(&oct->dispatch.lock); + + return 0; +} + +void octeon_delete_dispatch_list(struct octeon_device *oct) +{ + u32 i; + struct list_head freelist, *temp, *tmp2; + + INIT_LIST_HEAD(&freelist); + + spin_lock_bh(&oct->dispatch.lock); + + for (i = 0; i < DISPATCH_LIST_SIZE; i++) { + struct list_head *dispatch; + + dispatch = &oct->dispatch.dlist[i].list; + while (dispatch->next != dispatch) { + temp = dispatch->next; + list_del(temp); + list_add_tail(temp, &freelist); + } + + oct->dispatch.dlist[i].opcode = 0; + } + + oct->dispatch.count = 0; + + spin_unlock_bh(&oct->dispatch.lock); + + list_for_each_safe(temp, tmp2, &freelist) { + list_del(temp); + vfree(temp); + } +} + +octeon_dispatch_fn_t +octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, + u16 subcode) +{ + u32 idx; + struct list_head *dispatch; + octeon_dispatch_fn_t fn = NULL; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&octeon_dev->dispatch.lock); + + if (octeon_dev->dispatch.count == 0) { + spin_unlock_bh(&octeon_dev->dispatch.lock); + return NULL; + } + + if (!(octeon_dev->dispatch.dlist[idx].opcode)) { + spin_unlock_bh(&octeon_dev->dispatch.lock); + return NULL; + } + + if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { + fn = octeon_dev->dispatch.dlist[idx].dispatch_fn; + } else { + list_for_each(dispatch, + &octeon_dev->dispatch.dlist[idx].list) { + if (((struct octeon_dispatch *)dispatch)->opcode == + combined_opcode) { + fn = ((struct octeon_dispatch *) + dispatch)->dispatch_fn; + break; + } + } + } + + spin_unlock_bh(&octeon_dev->dispatch.lock); + return fn; +} + +/* octeon_register_dispatch_fn + * Parameters: + * octeon_id - id of the octeon device. + * opcode - opcode for which driver should call the registered function + * subcode - subcode for which driver should call the registered function + * fn - The function to call when a packet with "opcode" arrives in + * octeon output queues. + * fn_arg - The argument to be passed when calling function "fn". + * Description: + * Registers a function and its argument to be called when a packet + * arrives in Octeon output queues with "opcode". + * Returns: + * Success: 0 + * Failure: 1 + * Locks: + * No locks are held. + */ +int +octeon_register_dispatch_fn(struct octeon_device *oct, + u16 opcode, + u16 subcode, + octeon_dispatch_fn_t fn, void *fn_arg) +{ + u32 idx; + octeon_dispatch_fn_t pfn; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&oct->dispatch.lock); + /* Add dispatch function to first level of lookup table */ + if (oct->dispatch.dlist[idx].opcode == 0) { + oct->dispatch.dlist[idx].opcode = combined_opcode; + oct->dispatch.dlist[idx].dispatch_fn = fn; + oct->dispatch.dlist[idx].arg = fn_arg; + oct->dispatch.count++; + spin_unlock_bh(&oct->dispatch.lock); + return 0; + } + + spin_unlock_bh(&oct->dispatch.lock); + + /* Check if there was a function already registered for this + * opcode/subcode. + */ + pfn = octeon_get_dispatch(oct, opcode, subcode); + if (!pfn) { + struct octeon_dispatch *dispatch; + + dev_dbg(&oct->pci_dev->dev, + "Adding opcode to dispatch list linked list\n"); + dispatch = (struct octeon_dispatch *) + vmalloc(sizeof(struct octeon_dispatch)); + if (!dispatch) { + dev_err(&oct->pci_dev->dev, + "No memory to add dispatch function\n"); + return 1; + } + dispatch->opcode = combined_opcode; + dispatch->dispatch_fn = fn; + dispatch->arg = fn_arg; + + /* Add dispatch function to linked list of fn ptrs + * at the hashed index. + */ + spin_lock_bh(&oct->dispatch.lock); + list_add(&dispatch->list, &oct->dispatch.dlist[idx].list); + oct->dispatch.count++; + spin_unlock_bh(&oct->dispatch.lock); + + } else { + dev_err(&oct->pci_dev->dev, + "Found previously registered dispatch fn for opcode/subcode: %x/%x\n", + opcode, subcode); + return 1; + } + + return 0; +} + +/* octeon_unregister_dispatch_fn + * Parameters: + * oct - octeon device + * opcode - driver should unregister the function for this opcode + * subcode - driver should unregister the function for this subcode + * Description: + * Unregister the function set for this opcode+subcode. + * Returns: + * Success: 0 + * Failure: 1 + * Locks: + * No locks are held. + */ +int +octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode, + u16 subcode) +{ + int retval = 0; + u32 idx; + struct list_head *dispatch, *dfree = NULL, *tmp2; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&oct->dispatch.lock); + + if (oct->dispatch.count == 0) { + spin_unlock_bh(&oct->dispatch.lock); + dev_err(&oct->pci_dev->dev, + "No dispatch functions registered for this device\n"); + return 1; + } + + if (oct->dispatch.dlist[idx].opcode == combined_opcode) { + dispatch = &oct->dispatch.dlist[idx].list; + if (dispatch->next != dispatch) { + dispatch = dispatch->next; + oct->dispatch.dlist[idx].opcode = + ((struct octeon_dispatch *)dispatch)->opcode; + oct->dispatch.dlist[idx].dispatch_fn = + ((struct octeon_dispatch *) + dispatch)->dispatch_fn; + oct->dispatch.dlist[idx].arg = + ((struct octeon_dispatch *)dispatch)->arg; + list_del(dispatch); + dfree = dispatch; + } else { + oct->dispatch.dlist[idx].opcode = 0; + oct->dispatch.dlist[idx].dispatch_fn = NULL; + oct->dispatch.dlist[idx].arg = NULL; + } + } else { + retval = 1; + list_for_each_safe(dispatch, tmp2, + &(oct->dispatch.dlist[idx]. + list)) { + if (((struct octeon_dispatch *)dispatch)->opcode == + combined_opcode) { + list_del(dispatch); + dfree = dispatch; + retval = 0; + } + } + } + + if (!retval) + oct->dispatch.count--; + + spin_unlock_bh(&oct->dispatch.lock); + + if (dfree) + vfree(dfree); + + return retval; +} + +int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf) +{ + u32 i; + char app_name[16]; + struct octeon_device *oct = (struct octeon_device *)buf; + struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt; + struct octeon_core_setup *cs = NULL; + u32 num_nic_ports = 0; + + if (OCTEON_CN6XXX(oct)) + num_nic_ports = + CFG_GET_NUM_NIC_PORTS(CHIP_FIELD(oct, cn6xxx, conf)); + + if (atomic_read(&oct->status) >= OCT_DEV_RUNNING) { + dev_err(&oct->pci_dev->dev, "Received CORE OK when device state is 0x%x\n", + atomic_read(&oct->status)); + goto core_drv_init_err; + } + + strncpy(app_name, + get_oct_app_string( + (u32)recv_pkt->rh.r_core_drv_init.app_mode), + sizeof(app_name) - 1); + oct->app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; + if (recv_pkt->rh.r_core_drv_init.app_mode == CVM_DRV_NIC_APP) + oct->fw_info.max_nic_ports = + (u32)recv_pkt->rh.r_core_drv_init.max_nic_ports; + oct->fw_info.num_gmx_ports = + (u32)recv_pkt->rh.r_core_drv_init.num_gmx_ports; + + if (oct->fw_info.max_nic_ports < num_nic_ports) { + dev_err(&oct->pci_dev->dev, + "Config has more ports than firmware allows (%d > %d).\n", + num_nic_ports, oct->fw_info.max_nic_ports); + goto core_drv_init_err; + } + oct->fw_info.app_cap_flags = recv_pkt->rh.r_core_drv_init.app_cap_flags; + oct->fw_info.app_mode = (u32)recv_pkt->rh.r_core_drv_init.app_mode; + + atomic_set(&oct->status, OCT_DEV_CORE_OK); + + cs = &core_setup[oct->octeon_id]; + + if (recv_pkt->buffer_size[0] != sizeof(*cs)) { + dev_dbg(&oct->pci_dev->dev, "Core setup bytes expected %u found %d\n", + (u32)sizeof(*cs), + recv_pkt->buffer_size[0]); + } + + memcpy(cs, get_rbd(recv_pkt->buffer_ptr[0]), sizeof(*cs)); + strncpy(oct->boardinfo.name, cs->boardname, OCT_BOARD_NAME); + strncpy(oct->boardinfo.serial_number, cs->board_serial_number, + OCT_SERIAL_LEN); + + octeon_swap_8B_data((u64 *)cs, (sizeof(*cs) >> 3)); + + oct->boardinfo.major = cs->board_rev_major; + oct->boardinfo.minor = cs->board_rev_minor; + + dev_info(&oct->pci_dev->dev, + "Running %s (%llu Hz)\n", + app_name, CVM_CAST64(cs->corefreq)); + +core_drv_init_err: + for (i = 0; i < recv_pkt->buffer_count; i++) + recv_buffer_free(recv_pkt->buffer_ptr[i]); + octeon_free_recv_info(recv_info); + return 0; +} + +int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) + +{ + if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) && + (oct->io_qmask.iq & (1UL << q_no))) + return oct->instr_queue[q_no]->max_count; + + return -1; +} + +int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) +{ + if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) && + (oct->io_qmask.oq & (1UL << q_no))) + return oct->droq[q_no]->max_count; + return -1; +} + +/* Retruns the host firmware handshake OCTEON specific configuration */ +struct octeon_config *octeon_get_conf(struct octeon_device *oct) +{ + struct octeon_config *default_oct_conf = NULL; + + /* check the OCTEON Device model & return the corresponding octeon + * configuration + */ + + if (OCTEON_CN6XXX(oct)) { + default_oct_conf = + (struct octeon_config *)(CHIP_FIELD(oct, cn6xxx, conf)); + } + + return default_oct_conf; +} + +/* scratch register address is same in all the OCT-II and CN70XX models */ +#define CNXX_SLI_SCRATCH1 0x3C0 + +/** Get the octeon device pointer. + * @param octeon_id - The id for which the octeon device pointer is required. + * @return Success: Octeon device pointer. + * @return Failure: NULL. + */ +struct octeon_device *lio_get_device(u32 octeon_id) +{ + if (octeon_id >= MAX_OCTEON_DEVICES) + return NULL; + else + return octeon_device[octeon_id]; +} + +u64 lio_pci_readq(struct octeon_device *oct, u64 addr) +{ + u64 val64; + unsigned long flags; + u32 val32, addrhi; + + spin_lock_irqsave(&oct->pci_win_lock, flags); + + /* The windowed read happens when the LSB of the addr is written. + * So write MSB first + */ + addrhi = (addr >> 32); + if ((oct->chip_id == OCTEON_CN66XX) || (oct->chip_id == OCTEON_CN68XX)) + addrhi |= 0x00060000; + writel(addrhi, oct->reg_list.pci_win_rd_addr_hi); + + /* Read back to preserve ordering of writes */ + val32 = readl(oct->reg_list.pci_win_rd_addr_hi); + + writel(addr & 0xffffffff, oct->reg_list.pci_win_rd_addr_lo); + val32 = readl(oct->reg_list.pci_win_rd_addr_lo); + + val64 = readq(oct->reg_list.pci_win_rd_data); + + spin_unlock_irqrestore(&oct->pci_win_lock, flags); + + return val64; +} + +void lio_pci_writeq(struct octeon_device *oct, + u64 val, + u64 addr) +{ + u32 val32; + unsigned long flags; + + spin_lock_irqsave(&oct->pci_win_lock, flags); + + writeq(addr, oct->reg_list.pci_win_wr_addr); + + /* The write happens when the LSB is written. So write MSB first. */ + writel(val >> 32, oct->reg_list.pci_win_wr_data_hi); + /* Read the MSB to ensure ordering of writes. */ + val32 = readl(oct->reg_list.pci_win_wr_data_hi); + + writel(val & 0xffffffff, oct->reg_list.pci_win_wr_data_lo); + + spin_unlock_irqrestore(&oct->pci_win_lock, flags); +} + +int octeon_mem_access_ok(struct octeon_device *oct) +{ + u64 access_okay = 0; + + /* Check to make sure a DDR interface is enabled */ + u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL); + + access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK); + + return access_okay ? 0 : 1; +} + +int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout) +{ + int ret = 1; + u32 ms; + + if (!timeout) + return ret; + + while (*timeout == 0) + schedule_timeout_uninterruptible(HZ / 10); + + for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout)); + ms += HZ / 10) { + ret = octeon_mem_access_ok(oct); + + /* wait 100 ms */ + if (ret) + schedule_timeout_uninterruptible(HZ / 10); + } + + return ret; +} + +/** Get the octeon id assigned to the octeon device passed as argument. + * This function is exported to other modules. + * @param dev - octeon device pointer passed as a void *. + * @return octeon device id + */ +int lio_get_device_id(void *dev) +{ + struct octeon_device *octeon_dev = (struct octeon_device *)dev; + u32 i; + + for (i = 0; i < MAX_OCTEON_DEVICES; i++) + if (octeon_device[i] == octeon_dev) + return octeon_dev->octeon_id; + return -1; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h new file mode 100644 index 000000000000..36e1f85df8c4 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h @@ -0,0 +1,649 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ + +/*! \file octeon_device.h + * \brief Host Driver: This file defines the octeon device structure. + */ + +#ifndef _OCTEON_DEVICE_H_ +#define _OCTEON_DEVICE_H_ + +/** PCI VendorId Device Id */ +#define OCTEON_CN68XX_PCIID 0x91177d +#define OCTEON_CN66XX_PCIID 0x92177d + +/** Driver identifies chips by these Ids, created by clubbing together + * DeviceId+RevisionId; Where Revision Id is not used to distinguish + * between chips, a value of 0 is used for revision id. + */ +#define OCTEON_CN68XX 0x0091 +#define OCTEON_CN66XX 0x0092 + +/** Endian-swap modes supported by Octeon. */ +enum octeon_pci_swap_mode { + OCTEON_PCI_PASSTHROUGH = 0, + OCTEON_PCI_64BIT_SWAP = 1, + OCTEON_PCI_32BIT_BYTE_SWAP = 2, + OCTEON_PCI_32BIT_LW_SWAP = 3 +}; + +/*--------------- PCI BAR1 index registers -------------*/ + +/* BAR1 Mask */ +#define PCI_BAR1_ENABLE_CA 1 +#define PCI_BAR1_ENDIAN_MODE OCTEON_PCI_64BIT_SWAP +#define PCI_BAR1_ENTRY_VALID 1 +#define PCI_BAR1_MASK ((PCI_BAR1_ENABLE_CA << 3) \ + | (PCI_BAR1_ENDIAN_MODE << 1) \ + | PCI_BAR1_ENTRY_VALID) + +/** Octeon Device state. + * Each octeon device goes through each of these states + * as it is initialized. + */ +#define OCT_DEV_BEGIN_STATE 0x0 +#define OCT_DEV_PCI_MAP_DONE 0x1 +#define OCT_DEV_DISPATCH_INIT_DONE 0x2 +#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3 +#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4 +#define OCT_DEV_RESP_LIST_INIT_DONE 0x5 +#define OCT_DEV_DROQ_INIT_DONE 0x6 +#define OCT_DEV_IO_QUEUES_DONE 0x7 +#define OCT_DEV_CONSOLE_INIT_DONE 0x8 +#define OCT_DEV_HOST_OK 0x9 +#define OCT_DEV_CORE_OK 0xa +#define OCT_DEV_RUNNING 0xb +#define OCT_DEV_IN_RESET 0xc +#define OCT_DEV_STATE_INVALID 0xd + +#define OCT_DEV_STATES OCT_DEV_STATE_INVALID + +/** Octeon Device interrupts + * These interrupt bits are set in int_status filed of + * octeon_device structure + */ +#define OCT_DEV_INTR_DMA0_FORCE 0x01 +#define OCT_DEV_INTR_DMA1_FORCE 0x02 +#define OCT_DEV_INTR_PKT_DATA 0x04 + +#define LIO_RESET_SECS (3) + +/*---------------------------DISPATCH LIST-------------------------------*/ + +/** The dispatch list entry. + * The driver keeps a record of functions registered for each + * response header opcode in this structure. Since the opcode is + * hashed to index into the driver's list, more than one opcode + * can hash to the same entry, in which case the list field points + * to a linked list with the other entries. + */ +struct octeon_dispatch { + /** List head for this entry */ + struct list_head list; + + /** The opcode for which the dispatch function & arg should be used */ + u16 opcode; + + /** The function to be called for a packet received by the driver */ + octeon_dispatch_fn_t dispatch_fn; + + /* The application specified argument to be passed to the above + * function along with the received packet + */ + void *arg; +}; + +/** The dispatch list structure. */ +struct octeon_dispatch_list { + /** access to dispatch list must be atomic */ + spinlock_t lock; + + /** Count of dispatch functions currently registered */ + u32 count; + + /** The list of dispatch functions */ + struct octeon_dispatch *dlist; +}; + +/*----------------------- THE OCTEON DEVICE ---------------------------*/ + +#define OCT_MEM_REGIONS 3 +/** PCI address space mapping information. + * Each of the 3 address spaces given by BAR0, BAR2 and BAR4 of + * Octeon gets mapped to different physical address spaces in + * the kernel. + */ +struct octeon_mmio { + /** PCI address to which the BAR is mapped. */ + u64 start; + + /** Length of this PCI address space. */ + u32 len; + + /** Length that has been mapped to phys. address space. */ + u32 mapped_len; + + /** The physical address to which the PCI address space is mapped. */ + u8 __iomem *hw_addr; + + /** Flag indicating the mapping was successful. */ + u32 done; +}; + +#define MAX_OCTEON_MAPS 32 + +struct octeon_io_enable { + u32 iq; + u32 oq; + u32 iq64B; +}; + +struct octeon_reg_list { + u32 __iomem *pci_win_wr_addr_hi; + u32 __iomem *pci_win_wr_addr_lo; + u64 __iomem *pci_win_wr_addr; + + u32 __iomem *pci_win_rd_addr_hi; + u32 __iomem *pci_win_rd_addr_lo; + u64 __iomem *pci_win_rd_addr; + + u32 __iomem *pci_win_wr_data_hi; + u32 __iomem *pci_win_wr_data_lo; + u64 __iomem *pci_win_wr_data; + + u32 __iomem *pci_win_rd_data_hi; + u32 __iomem *pci_win_rd_data_lo; + u64 __iomem *pci_win_rd_data; +}; + +#define OCTEON_CONSOLE_MAX_READ_BYTES 512 +struct octeon_console { + u32 active; + u32 waiting; + u64 addr; + u32 buffer_size; + u64 input_base_addr; + u64 output_base_addr; + char leftover[OCTEON_CONSOLE_MAX_READ_BYTES]; +}; + +struct octeon_board_info { + char name[OCT_BOARD_NAME]; + char serial_number[OCT_SERIAL_LEN]; + u64 major; + u64 minor; +}; + +struct octeon_fn_list { + void (*setup_iq_regs)(struct octeon_device *, u32); + void (*setup_oq_regs)(struct octeon_device *, u32); + + irqreturn_t (*process_interrupt_regs)(void *); + int (*soft_reset)(struct octeon_device *); + int (*setup_device_regs)(struct octeon_device *); + void (*reinit_regs)(struct octeon_device *); + void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int); + void (*bar1_idx_write)(struct octeon_device *, u32, u32); + u32 (*bar1_idx_read)(struct octeon_device *, u32); + u32 (*update_iq_read_idx)(struct octeon_device *, + struct octeon_instr_queue *); + + void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32); + void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32); + + void (*enable_interrupt)(void *); + void (*disable_interrupt)(void *); + + void (*enable_io_queues)(struct octeon_device *); + void (*disable_io_queues)(struct octeon_device *); +}; + +/* Must be multiple of 8, changing breaks ABI */ +#define CVMX_BOOTMEM_NAME_LEN 128 + +/* Structure for named memory blocks + * Number of descriptors + * available can be changed without affecting compatiblity, + * but name length changes require a bump in the bootmem + * descriptor version + * Note: This structure must be naturally 64 bit aligned, as a single + * memory image will be used by both 32 and 64 bit programs. + */ +struct cvmx_bootmem_named_block_desc { + /** Base address of named block */ + u64 base_addr; + + /** Size actually allocated for named block */ + u64 size; + + /** name of named block */ + char name[CVMX_BOOTMEM_NAME_LEN]; +}; + +struct oct_fw_info { + u32 max_nic_ports; /** max nic ports for the device */ + u32 num_gmx_ports; /** num gmx ports */ + u64 app_cap_flags; /** firmware cap flags */ + + /** The core application is running in this mode. + * See octeon-drv-opcodes.h for values. + */ + u32 app_mode; + char liquidio_firmware_version[32]; +}; + +/* wrappers around work structs */ +struct cavium_wk { + struct delayed_work work; + void *ctxptr; + size_t ctxul; +}; + +struct cavium_wq { + struct workqueue_struct *wq; + struct cavium_wk wk; +}; + +struct octdev_props { + /* Each interface in the Octeon device has a network + * device pointer (used for OS specific calls). + */ + struct net_device *netdev; +}; + +/** The Octeon device. + * Each Octeon device has this structure to represent all its + * components. + */ +struct octeon_device { + /** Lock for PCI window configuration accesses */ + spinlock_t pci_win_lock; + + /** Lock for memory accesses */ + spinlock_t mem_access_lock; + + /** PCI device pointer */ + struct pci_dev *pci_dev; + + /** Chip specific information. */ + void *chip; + + /** Number of interfaces detected in this octeon device. */ + u32 ifcount; + + struct octdev_props props[MAX_OCTEON_LINKS]; + + /** Octeon Chip type. */ + u16 chip_id; + u16 rev_id; + + /** This device's id - set by the driver. */ + u32 octeon_id; + + /** This device's PCIe port used for traffic. */ + u16 pcie_port; + + u16 flags; +#define LIO_FLAG_MSI_ENABLED (u32)(1 << 1) +#define LIO_FLAG_MSIX_ENABLED (u32)(1 << 2) + + /** The state of this device */ + atomic_t status; + + /** memory mapped io range */ + struct octeon_mmio mmio[OCT_MEM_REGIONS]; + + struct octeon_reg_list reg_list; + + struct octeon_fn_list fn_list; + + struct octeon_board_info boardinfo; + + u32 num_iqs; + + /* The pool containing pre allocated buffers used for soft commands */ + struct octeon_sc_buffer_pool sc_buf_pool; + + /** The input instruction queues */ + struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES]; + + /** The doubly-linked list of instruction response */ + struct octeon_response_list response_list[MAX_RESPONSE_LISTS]; + + u32 num_oqs; + + /** The DROQ output queues */ + struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES]; + + struct octeon_io_enable io_qmask; + + /** List of dispatch functions */ + struct octeon_dispatch_list dispatch; + + /* Interrupt Moderation */ + struct oct_intrmod_cfg intrmod; + + u32 int_status; + + u64 droq_intr; + + /** Physical location of the cvmx_bootmem_desc_t in octeon memory */ + u64 bootmem_desc_addr; + + /** Placeholder memory for named blocks. + * Assumes single-threaded access + */ + struct cvmx_bootmem_named_block_desc bootmem_named_block_desc; + + /** Address of consoles descriptor */ + u64 console_desc_addr; + + /** Number of consoles available. 0 means they are inaccessible */ + u32 num_consoles; + + /* Console caches */ + struct octeon_console console[MAX_OCTEON_MAPS]; + + /* Coprocessor clock rate. */ + u64 coproc_clock_rate; + + /** The core application is running in this mode. See liquidio_common.h + * for values. + */ + u32 app_mode; + + struct oct_fw_info fw_info; + + /** The name given to this device. */ + char device_name[32]; + + /** Application Context */ + void *app_ctx; + + struct cavium_wq dma_comp_wq; + + struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES]; + + struct cavium_wk nic_poll_work; + + struct cavium_wk console_poll_work[MAX_OCTEON_MAPS]; + + void *priv; +}; + +#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \ + (oct->chip_id == OCTEON_CN68XX)) +#define CHIP_FIELD(oct, TYPE, field) \ + (((struct octeon_ ## TYPE *)(oct->chip))->field) + +struct oct_intrmod_cmd { + struct octeon_device *oct_dev; + struct octeon_soft_command *sc; + struct oct_intrmod_cfg *cfg; +}; + +/*------------------ Function Prototypes ----------------------*/ + +/** Initialize device list memory */ +void octeon_init_device_list(int conf_type); + +/** Free memory for Input and Output queue structures for a octeon device */ +void octeon_free_device_mem(struct octeon_device *); + +/* Look up a free entry in the octeon_device table and allocate resources + * for the octeon_device structure for an octeon device. Called at init + * time. + */ +struct octeon_device *octeon_allocate_device(u32 pci_id, + u32 priv_size); + +/** Initialize the driver's dispatch list which is a mix of a hash table + * and a linked list. This is done at driver load time. + * @param octeon_dev - pointer to the octeon device structure. + * @return 0 on success, else -ve error value + */ +int octeon_init_dispatch_list(struct octeon_device *octeon_dev); + +/** Delete the driver's dispatch list and all registered entries. + * This is done at driver unload time. + * @param octeon_dev - pointer to the octeon device structure. + */ +void octeon_delete_dispatch_list(struct octeon_device *octeon_dev); + +/** Initialize the core device fields with the info returned by the FW. + * @param recv_info - Receive info structure + * @param buf - Receive buffer + */ +int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf); + +/** Gets the dispatch function registered to receive packets with a + * given opcode/subcode. + * @param octeon_dev - the octeon device pointer. + * @param opcode - the opcode for which the dispatch function + * is to checked. + * @param subcode - the subcode for which the dispatch function + * is to checked. + * + * @return Success: octeon_dispatch_fn_t (dispatch function pointer) + * @return Failure: NULL + * + * Looks up the dispatch list to get the dispatch function for a + * given opcode. + */ +octeon_dispatch_fn_t +octeon_get_dispatch(struct octeon_device *octeon_dev, u16 opcode, + u16 subcode); + +/** Get the octeon device pointer. + * @param octeon_id - The id for which the octeon device pointer is required. + * @return Success: Octeon device pointer. + * @return Failure: NULL. + */ +struct octeon_device *lio_get_device(u32 octeon_id); + +/** Get the octeon id assigned to the octeon device passed as argument. + * This function is exported to other modules. + * @param dev - octeon device pointer passed as a void *. + * @return octeon device id + */ +int lio_get_device_id(void *dev); + +static inline u16 OCTEON_MAJOR_REV(struct octeon_device *oct) +{ + u16 rev = (oct->rev_id & 0xC) >> 2; + + return (rev == 0) ? 1 : rev; +} + +static inline u16 OCTEON_MINOR_REV(struct octeon_device *oct) +{ + return oct->rev_id & 0x3; +} + +/** Read windowed register. + * @param oct - pointer to the Octeon device. + * @param addr - Address of the register to read. + * + * This routine is called to read from the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return - 64 bit value read from the register. + */ + +u64 lio_pci_readq(struct octeon_device *oct, u64 addr); + +/** Write windowed register. + * @param oct - pointer to the Octeon device. + * @param val - Value to write + * @param addr - Address of the register to write + * + * This routine is called to write to the indirectly accessed + * Octeon registers that are visible through a PCI BAR0 mapped window + * register. + * @return Nothing. + */ +void lio_pci_writeq(struct octeon_device *oct, u64 val, u64 addr); + +/* Routines for reading and writing CSRs */ +#define octeon_write_csr(oct_dev, reg_off, value) \ + writel(value, oct_dev->mmio[0].hw_addr + reg_off) + +#define octeon_write_csr64(oct_dev, reg_off, val64) \ + writeq(val64, oct_dev->mmio[0].hw_addr + reg_off) + +#define octeon_read_csr(oct_dev, reg_off) \ + readl(oct_dev->mmio[0].hw_addr + reg_off) + +#define octeon_read_csr64(oct_dev, reg_off) \ + readq(oct_dev->mmio[0].hw_addr + reg_off) + +/** + * Checks if memory access is okay + * + * @param oct which octeon to send to + * @return Zero on success, negative on failure. + */ +int octeon_mem_access_ok(struct octeon_device *oct); + +/** + * Waits for DDR initialization. + * + * @param oct which octeon to send to + * @param timeout_in_ms pointer to how long to wait until DDR is initialized + * in ms. + * If contents are 0, it waits until contents are non-zero + * before starting to check. + * @return Zero on success, negative on failure. + */ +int octeon_wait_for_ddr_init(struct octeon_device *oct, + u32 *timeout_in_ms); + +/** + * Wait for u-boot to boot and be waiting for a command. + * + * @param wait_time_hundredths + * Maximum time to wait + * + * @return Zero on success, negative on failure. + */ +int octeon_wait_for_bootloader(struct octeon_device *oct, + u32 wait_time_hundredths); + +/** + * Initialize console access + * + * @param oct which octeon initialize + * @return Zero on success, negative on failure. + */ +int octeon_init_consoles(struct octeon_device *oct); + +/** + * Adds access to a console to the device. + * + * @param oct which octeon to add to + * @param console_num which console + * @return Zero on success, negative on failure. + */ +int octeon_add_console(struct octeon_device *oct, u32 console_num); + +/** write or read from a console */ +int octeon_console_write(struct octeon_device *oct, u32 console_num, + char *buffer, u32 write_request_size, u32 flags); +int octeon_console_write_avail(struct octeon_device *oct, u32 console_num); +int octeon_console_read(struct octeon_device *oct, u32 console_num, + char *buffer, u32 buf_size, u32 flags); +int octeon_console_read_avail(struct octeon_device *oct, u32 console_num); + +/** Removes all attached consoles. */ +void octeon_remove_consoles(struct octeon_device *oct); + +/** + * Send a string to u-boot on console 0 as a command. + * + * @param oct which octeon to send to + * @param cmd_str String to send + * @param wait_hundredths Time to wait for u-boot to accept the command. + * + * @return Zero on success, negative on failure. + */ +int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str, + u32 wait_hundredths); + +/** Parses, validates, and downloads firmware, then boots associated cores. + * @param oct which octeon to download firmware to + * @param data - The complete firmware file image + * @param size - The size of the data + * + * @return 0 if success. + * -EINVAL if file is incompatible or badly formatted. + * -ENODEV if no handler was found for the application type or an + * invalid octeon id was passed. + */ +int octeon_download_firmware(struct octeon_device *oct, const u8 *data, + size_t size); + +char *lio_get_state_string(atomic_t *state_ptr); + +/** Sets up instruction queues for the device + * @param oct which octeon to setup + * + * @return 0 if success. 1 if fails + */ +int octeon_setup_instr_queues(struct octeon_device *oct); + +/** Sets up output queues for the device + * @param oct which octeon to setup + * + * @return 0 if success. 1 if fails + */ +int octeon_setup_output_queues(struct octeon_device *oct); + +int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no); + +int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no); + +/** Turns off the input and output queues for the device + * @param oct which octeon to disable + */ +void octeon_set_io_queues_off(struct octeon_device *oct); + +/** Turns on or off the given output queue for the device + * @param oct which octeon to change + * @param q_no which queue + * @param enable 1 to enable, 0 to disable + */ +void octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable); + +/** Retrieve the config for the device + * @param oct which octeon + * @param card_type type of card + * + * @returns pointer to configuration + */ +void *oct_get_config_info(struct octeon_device *oct, u16 card_type); + +/** Gets the octeon device configuration + * @return - pointer to the octeon configuration struture + */ +struct octeon_config *octeon_get_conf(struct octeon_device *oct); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c new file mode 100644 index 000000000000..60a186f1609b --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -0,0 +1,988 @@ +/********************************************************************** +* Author: Cavium, Inc. +* +* Contact: support@cavium.com +* Please include "LiquidIO" in the subject. +* +* Copyright (c) 2003-2015 Cavium, Inc. +* +* This file is free software; you can redistribute it and/or modify +* it under the terms of the GNU General Public License, Version 2, as +* published by the Free Software Foundation. +* +* This file is distributed in the hope that it will be useful, but +* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty +* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or +* NONINFRINGEMENT. See the GNU General Public License for more +* details. +* +* This file may also be available under a different license from Cavium. +* Contact Cavium, Inc. for more information +**********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +/* #define CAVIUM_ONLY_PERF_MODE */ + +#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2)) +#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2)) + +struct niclist { + struct list_head list; + void *ptr; +}; + +struct __dispatch { + struct list_head list; + struct octeon_recv_info *rinfo; + octeon_dispatch_fn_t disp_fn; +}; + +/** Get the argument that the user set when registering dispatch + * function for a given opcode/subcode. + * @param octeon_dev - the octeon device pointer. + * @param opcode - the opcode for which the dispatch argument + * is to be checked. + * @param subcode - the subcode for which the dispatch argument + * is to be checked. + * @return Success: void * (argument to the dispatch function) + * @return Failure: NULL + * + */ +static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev, + u16 opcode, u16 subcode) +{ + int idx; + struct list_head *dispatch; + void *fn_arg = NULL; + u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode); + + idx = combined_opcode & OCTEON_OPCODE_MASK; + + spin_lock_bh(&octeon_dev->dispatch.lock); + + if (octeon_dev->dispatch.count == 0) { + spin_unlock_bh(&octeon_dev->dispatch.lock); + return NULL; + } + + if (octeon_dev->dispatch.dlist[idx].opcode == combined_opcode) { + fn_arg = octeon_dev->dispatch.dlist[idx].arg; + } else { + list_for_each(dispatch, + &octeon_dev->dispatch.dlist[idx].list) { + if (((struct octeon_dispatch *)dispatch)->opcode == + combined_opcode) { + fn_arg = ((struct octeon_dispatch *) + dispatch)->arg; + break; + } + } + } + + spin_unlock_bh(&octeon_dev->dispatch.lock); + return fn_arg; +} + +u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, + struct octeon_droq *droq) +{ + u32 pkt_count = 0; + + pkt_count = readl(droq->pkts_sent_reg); + if (pkt_count) { + atomic_add(pkt_count, &droq->pkts_pending); + writel(pkt_count, droq->pkts_sent_reg); + } + + return pkt_count; +} + +static void octeon_droq_compute_max_packet_bufs(struct octeon_droq *droq) +{ + u32 count = 0; + + /* max_empty_descs is the max. no. of descs that can have no buffers. + * If the empty desc count goes beyond this value, we cannot safely + * read in a 64K packet sent by Octeon + * (64K is max pkt size from Octeon) + */ + droq->max_empty_descs = 0; + + do { + droq->max_empty_descs++; + count += droq->buffer_size; + } while (count < (64 * 1024)); + + droq->max_empty_descs = droq->max_count - droq->max_empty_descs; +} + +static void octeon_droq_reset_indices(struct octeon_droq *droq) +{ + droq->read_idx = 0; + droq->write_idx = 0; + droq->refill_idx = 0; + droq->refill_count = 0; + atomic_set(&droq->pkts_pending, 0); +} + +static void +octeon_droq_destroy_ring_buffers(struct octeon_device *oct, + struct octeon_droq *droq) +{ + u32 i; + + for (i = 0; i < droq->max_count; i++) { + if (droq->recv_buf_list[i].buffer) { + if (droq->desc_ring) { + lio_unmap_ring_info(oct->pci_dev, + (u64)droq-> + desc_ring[i].info_ptr, + OCT_DROQ_INFO_SIZE); + lio_unmap_ring(oct->pci_dev, + (u64)droq->desc_ring[i]. + buffer_ptr, + droq->buffer_size); + } + recv_buffer_free(droq->recv_buf_list[i].buffer); + droq->recv_buf_list[i].buffer = NULL; + } + } + + octeon_droq_reset_indices(droq); +} + +static int +octeon_droq_setup_ring_buffers(struct octeon_device *oct, + struct octeon_droq *droq) +{ + u32 i; + void *buf; + struct octeon_droq_desc *desc_ring = droq->desc_ring; + + for (i = 0; i < droq->max_count; i++) { + buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size); + + if (!buf) { + dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", + __func__); + return -ENOMEM; + } + + droq->recv_buf_list[i].buffer = buf; + droq->recv_buf_list[i].data = get_rbd(buf); + + droq->info_list[i].length = 0; + + /* map ring buffers into memory */ + desc_ring[i].info_ptr = lio_map_ring_info(droq, i); + desc_ring[i].buffer_ptr = + lio_map_ring(oct->pci_dev, + droq->recv_buf_list[i].buffer, + droq->buffer_size); + } + + octeon_droq_reset_indices(droq); + + octeon_droq_compute_max_packet_bufs(droq); + + return 0; +} + +int octeon_delete_droq(struct octeon_device *oct, u32 q_no) +{ + struct octeon_droq *droq = oct->droq[q_no]; + + dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); + + octeon_droq_destroy_ring_buffers(oct, droq); + + if (droq->recv_buf_list) + vfree(droq->recv_buf_list); + + if (droq->info_base_addr) + cnnic_free_aligned_dma(oct->pci_dev, droq->info_list, + droq->info_alloc_size, + droq->info_base_addr, + droq->info_list_dma); + + if (droq->desc_ring) + lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), + droq->desc_ring, droq->desc_ring_dma); + + memset(droq, 0, OCT_DROQ_SIZE); + + return 0; +} + +int octeon_init_droq(struct octeon_device *oct, + u32 q_no, + u32 num_descs, + u32 desc_size, + void *app_ctx) +{ + struct octeon_droq *droq; + u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; + u32 c_pkts_per_intr = 0, c_refill_threshold = 0; + + dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); + + droq = oct->droq[q_no]; + memset(droq, 0, OCT_DROQ_SIZE); + + droq->oct_dev = oct; + droq->q_no = q_no; + if (app_ctx) + droq->app_ctx = app_ctx; + else + droq->app_ctx = (void *)(size_t)q_no; + + c_num_descs = num_descs; + c_buf_size = desc_size; + if (OCTEON_CN6XXX(oct)) { + struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); + + c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); + c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); + } + + droq->max_count = c_num_descs; + droq->buffer_size = c_buf_size; + + desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; + droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, + (dma_addr_t *)&droq->desc_ring_dma); + + if (!droq->desc_ring) { + dev_err(&oct->pci_dev->dev, + "Output queue %d ring alloc failed\n", q_no); + return 1; + } + + dev_dbg(&oct->pci_dev->dev, "droq[%d]: desc_ring: virt: 0x%p, dma: %lx\n", + q_no, droq->desc_ring, droq->desc_ring_dma); + dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no, + droq->max_count); + + droq->info_list = + cnnic_alloc_aligned_dma(oct->pci_dev, + (droq->max_count * OCT_DROQ_INFO_SIZE), + &droq->info_alloc_size, + &droq->info_base_addr, + &droq->info_list_dma); + + if (!droq->info_list) { + dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); + lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), + droq->desc_ring, droq->desc_ring_dma); + return 1; + } + + droq->recv_buf_list = (struct octeon_recv_buffer *) + vmalloc(droq->max_count * + OCT_DROQ_RECVBUF_SIZE); + if (!droq->recv_buf_list) { + dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); + goto init_droq_fail; + } + + if (octeon_droq_setup_ring_buffers(oct, droq)) + goto init_droq_fail; + + droq->pkts_per_intr = c_pkts_per_intr; + droq->refill_threshold = c_refill_threshold; + + dev_dbg(&oct->pci_dev->dev, "DROQ INIT: max_empty_descs: %d\n", + droq->max_empty_descs); + + spin_lock_init(&droq->lock); + + INIT_LIST_HEAD(&droq->dispatch_list); + + /* For 56xx Pass1, this function won't be called, so no checks. */ + oct->fn_list.setup_oq_regs(oct, q_no); + + oct->io_qmask.oq |= (1 << q_no); + + return 0; + +init_droq_fail: + octeon_delete_droq(oct, q_no); + return 1; +} + +/* octeon_create_recv_info + * Parameters: + * octeon_dev - pointer to the octeon device structure + * droq - droq in which the packet arrived. + * buf_cnt - no. of buffers used by the packet. + * idx - index in the descriptor for the first buffer in the packet. + * Description: + * Allocates a recv_info_t and copies the buffer addresses for packet data + * into the recv_pkt space which starts at an 8B offset from recv_info_t. + * Flags the descriptors for refill later. If available descriptors go + * below the threshold to receive a 64K pkt, new buffers are first allocated + * before the recv_pkt_t is created. + * This routine will be called in interrupt context. + * Returns: + * Success: Pointer to recv_info_t + * Failure: NULL. + * Locks: + * The droq->lock is held when this routine is called. + */ +static inline struct octeon_recv_info *octeon_create_recv_info( + struct octeon_device *octeon_dev, + struct octeon_droq *droq, + u32 buf_cnt, + u32 idx) +{ + struct octeon_droq_info *info; + struct octeon_recv_pkt *recv_pkt; + struct octeon_recv_info *recv_info; + u32 i, bytes_left; + + info = &droq->info_list[idx]; + + recv_info = octeon_alloc_recv_info(sizeof(struct __dispatch)); + if (!recv_info) + return NULL; + + recv_pkt = recv_info->recv_pkt; + recv_pkt->rh = info->rh; + recv_pkt->length = (u32)info->length; + recv_pkt->buffer_count = (u16)buf_cnt; + recv_pkt->octeon_id = (u16)octeon_dev->octeon_id; + + i = 0; + bytes_left = (u32)info->length; + + while (buf_cnt) { + lio_unmap_ring(octeon_dev->pci_dev, + (u64)droq->desc_ring[idx].buffer_ptr, + droq->buffer_size); + + recv_pkt->buffer_size[i] = + (bytes_left >= + droq->buffer_size) ? droq->buffer_size : bytes_left; + + recv_pkt->buffer_ptr[i] = droq->recv_buf_list[idx].buffer; + droq->recv_buf_list[idx].buffer = NULL; + + INCR_INDEX_BY1(idx, droq->max_count); + bytes_left -= droq->buffer_size; + i++; + buf_cnt--; + } + + return recv_info; +} + +/* If we were not able to refill all buffers, try to move around + * the buffers that were not dispatched. + */ +static inline u32 +octeon_droq_refill_pullup_descs(struct octeon_droq *droq, + struct octeon_droq_desc *desc_ring) +{ + u32 desc_refilled = 0; + + u32 refill_index = droq->refill_idx; + + while (refill_index != droq->read_idx) { + if (droq->recv_buf_list[refill_index].buffer) { + droq->recv_buf_list[droq->refill_idx].buffer = + droq->recv_buf_list[refill_index].buffer; + droq->recv_buf_list[droq->refill_idx].data = + droq->recv_buf_list[refill_index].data; + desc_ring[droq->refill_idx].buffer_ptr = + desc_ring[refill_index].buffer_ptr; + droq->recv_buf_list[refill_index].buffer = NULL; + desc_ring[refill_index].buffer_ptr = 0; + do { + INCR_INDEX_BY1(droq->refill_idx, + droq->max_count); + desc_refilled++; + droq->refill_count--; + } while (droq->recv_buf_list[droq->refill_idx]. + buffer); + } + INCR_INDEX_BY1(refill_index, droq->max_count); + } /* while */ + return desc_refilled; +} + +/* octeon_droq_refill + * Parameters: + * droq - droq in which descriptors require new buffers. + * Description: + * Called during normal DROQ processing in interrupt mode or by the poll + * thread to refill the descriptors from which buffers were dispatched + * to upper layers. Attempts to allocate new buffers. If that fails, moves + * up buffers (that were not dispatched) to form a contiguous ring. + * Returns: + * No of descriptors refilled. + * Locks: + * This routine is called with droq->lock held. + */ +static u32 +octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq) +{ + struct octeon_droq_desc *desc_ring; + void *buf = NULL; + u8 *data; + u32 desc_refilled = 0; + + desc_ring = droq->desc_ring; + + while (droq->refill_count && (desc_refilled < droq->max_count)) { + /* If a valid buffer exists (happens if there is no dispatch), + * reuse + * the buffer, else allocate. + */ + if (!droq->recv_buf_list[droq->refill_idx].buffer) { + buf = recv_buffer_alloc(octeon_dev, droq->q_no, + droq->buffer_size); + /* If a buffer could not be allocated, no point in + * continuing + */ + if (!buf) + break; + droq->recv_buf_list[droq->refill_idx].buffer = + buf; + data = get_rbd(buf); + } else { + data = get_rbd(droq->recv_buf_list + [droq->refill_idx].buffer); + } + + droq->recv_buf_list[droq->refill_idx].data = data; + + desc_ring[droq->refill_idx].buffer_ptr = + lio_map_ring(octeon_dev->pci_dev, + droq->recv_buf_list[droq-> + refill_idx].buffer, + droq->buffer_size); + + /* Reset any previous values in the length field. */ + droq->info_list[droq->refill_idx].length = 0; + + INCR_INDEX_BY1(droq->refill_idx, droq->max_count); + desc_refilled++; + droq->refill_count--; + } + + if (droq->refill_count) + desc_refilled += + octeon_droq_refill_pullup_descs(droq, desc_ring); + + /* if droq->refill_count + * The refill count would not change in pass two. We only moved buffers + * to close the gap in the ring, but we would still have the same no. of + * buffers to refill. + */ + return desc_refilled; +} + +static inline u32 +octeon_droq_get_bufcount(u32 buf_size, u32 total_len) +{ + u32 buf_cnt = 0; + + while (total_len > (buf_size * buf_cnt)) + buf_cnt++; + return buf_cnt; +} + +static int +octeon_droq_dispatch_pkt(struct octeon_device *oct, + struct octeon_droq *droq, + union octeon_rh *rh, + struct octeon_droq_info *info) +{ + u32 cnt; + octeon_dispatch_fn_t disp_fn; + struct octeon_recv_info *rinfo; + + cnt = octeon_droq_get_bufcount(droq->buffer_size, (u32)info->length); + + disp_fn = octeon_get_dispatch(oct, (u16)rh->r.opcode, + (u16)rh->r.subcode); + if (disp_fn) { + rinfo = octeon_create_recv_info(oct, droq, cnt, droq->read_idx); + if (rinfo) { + struct __dispatch *rdisp = rinfo->rsvd; + + rdisp->rinfo = rinfo; + rdisp->disp_fn = disp_fn; + rinfo->recv_pkt->rh = *rh; + list_add_tail(&rdisp->list, + &droq->dispatch_list); + } else { + droq->stats.dropped_nomem++; + } + } else { + dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n"); + droq->stats.dropped_nodispatch++; + } /* else (dispatch_fn ... */ + + return cnt; +} + +static inline void octeon_droq_drop_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 cnt) +{ + u32 i = 0, buf_cnt; + struct octeon_droq_info *info; + + for (i = 0; i < cnt; i++) { + info = &droq->info_list[droq->read_idx]; + octeon_swap_8B_data((u64 *)info, 2); + + if (info->length) { + info->length -= OCT_RH_SIZE; + droq->stats.bytes_received += info->length; + buf_cnt = octeon_droq_get_bufcount(droq->buffer_size, + (u32)info->length); + } else { + dev_err(&oct->pci_dev->dev, "DROQ: In drop: pkt with len 0\n"); + buf_cnt = 1; + } + + INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count); + droq->refill_count += buf_cnt; + } +} + +static u32 +octeon_droq_fast_process_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 pkts_to_process) +{ + struct octeon_droq_info *info; + union octeon_rh *rh; + u32 pkt, total_len = 0, pkt_count; + + pkt_count = pkts_to_process; + + for (pkt = 0; pkt < pkt_count; pkt++) { + u32 pkt_len = 0; + struct sk_buff *nicbuf = NULL; + + info = &droq->info_list[droq->read_idx]; + octeon_swap_8B_data((u64 *)info, 2); + + if (!info->length) { + dev_err(&oct->pci_dev->dev, + "DROQ[%d] idx: %d len:0, pkt_cnt: %d\n", + droq->q_no, droq->read_idx, pkt_count); + print_hex_dump_bytes("", DUMP_PREFIX_ADDRESS, + (u8 *)info, + OCT_DROQ_INFO_SIZE); + break; + } + + /* Len of resp hdr in included in the received data len. */ + info->length -= OCT_RH_SIZE; + rh = &info->rh; + + total_len += (u32)info->length; + + if (OPCODE_SLOW_PATH(rh)) { + u32 buf_cnt; + + buf_cnt = octeon_droq_dispatch_pkt(oct, droq, rh, info); + INCR_INDEX(droq->read_idx, buf_cnt, droq->max_count); + droq->refill_count += buf_cnt; + } else { + if (info->length <= droq->buffer_size) { + lio_unmap_ring(oct->pci_dev, + (u64)droq->desc_ring[ + droq->read_idx].buffer_ptr, + droq->buffer_size); + pkt_len = (u32)info->length; + nicbuf = droq->recv_buf_list[ + droq->read_idx].buffer; + droq->recv_buf_list[droq->read_idx].buffer = + NULL; + INCR_INDEX_BY1(droq->read_idx, droq->max_count); + skb_put(nicbuf, pkt_len); + droq->refill_count++; + } else { + nicbuf = octeon_fast_packet_alloc(oct, droq, + droq->q_no, + (u32) + info->length); + pkt_len = 0; + /* nicbuf allocation can fail. We'll handle it + * inside the loop. + */ + while (pkt_len < info->length) { + int cpy_len; + + cpy_len = ((pkt_len + + droq->buffer_size) > + info->length) ? + ((u32)info->length - pkt_len) : + droq->buffer_size; + + if (nicbuf) { + lio_unmap_ring(oct->pci_dev, + (u64) + droq->desc_ring + [droq->read_idx]. + buffer_ptr, + droq-> + buffer_size); + octeon_fast_packet_next(droq, + nicbuf, + cpy_len, + droq-> + read_idx + ); + } + + pkt_len += cpy_len; + INCR_INDEX_BY1(droq->read_idx, + droq->max_count); + droq->refill_count++; + } + } + + if (nicbuf) { + if (droq->ops.fptr) + droq->ops.fptr(oct->octeon_id, + nicbuf, pkt_len, + rh, &droq->napi); + else + recv_buffer_free(nicbuf); + } + } + + if (droq->refill_count >= droq->refill_threshold) { + int desc_refilled = octeon_droq_refill(oct, droq); + + /* Flush the droq descriptor data to memory to be sure + * that when we update the credits the data in memory + * is accurate. + */ + wmb(); + writel((desc_refilled), droq->pkts_credit_reg); + /* make sure mmio write completes */ + mmiowb(); + } + + } /* for ( each packet )... */ + + /* Increment refill_count by the number of buffers processed. */ + droq->stats.pkts_received += pkt; + droq->stats.bytes_received += total_len; + + if ((droq->ops.drop_on_max) && (pkts_to_process - pkt)) { + octeon_droq_drop_packets(oct, droq, (pkts_to_process - pkt)); + + droq->stats.dropped_toomany += (pkts_to_process - pkt); + return pkts_to_process; + } + + return pkt; +} + +int +octeon_droq_process_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 budget) +{ + u32 pkt_count = 0, pkts_processed = 0; + struct list_head *tmp, *tmp2; + + pkt_count = atomic_read(&droq->pkts_pending); + if (!pkt_count) + return 0; + + if (pkt_count > budget) + pkt_count = budget; + + /* Grab the lock */ + spin_lock(&droq->lock); + + pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count); + + atomic_sub(pkts_processed, &droq->pkts_pending); + + /* Release the spin lock */ + spin_unlock(&droq->lock); + + list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { + struct __dispatch *rdisp = (struct __dispatch *)tmp; + + list_del(tmp); + rdisp->disp_fn(rdisp->rinfo, + octeon_get_dispatch_arg + (oct, + (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, + (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); + } + + /* If there are packets pending. schedule tasklet again */ + if (atomic_read(&droq->pkts_pending)) + return 1; + + return 0; +} + +/** + * Utility function to poll for packets. check_hw_for_packets must be + * called before calling this routine. + */ + +static int +octeon_droq_process_poll_pkts(struct octeon_device *oct, + struct octeon_droq *droq, u32 budget) +{ + struct list_head *tmp, *tmp2; + u32 pkts_available = 0, pkts_processed = 0; + u32 total_pkts_processed = 0; + + if (budget > droq->max_count) + budget = droq->max_count; + + spin_lock(&droq->lock); + + while (total_pkts_processed < budget) { + pkts_available = + CVM_MIN((budget - total_pkts_processed), + (u32)(atomic_read(&droq->pkts_pending))); + + if (pkts_available == 0) + break; + + pkts_processed = + octeon_droq_fast_process_packets(oct, droq, + pkts_available); + + atomic_sub(pkts_processed, &droq->pkts_pending); + + total_pkts_processed += pkts_processed; + + octeon_droq_check_hw_for_pkts(oct, droq); + } + + spin_unlock(&droq->lock); + + list_for_each_safe(tmp, tmp2, &droq->dispatch_list) { + struct __dispatch *rdisp = (struct __dispatch *)tmp; + + list_del(tmp); + rdisp->disp_fn(rdisp->rinfo, + octeon_get_dispatch_arg + (oct, + (u16)rdisp->rinfo->recv_pkt->rh.r.opcode, + (u16)rdisp->rinfo->recv_pkt->rh.r.subcode)); + } + + return total_pkts_processed; +} + +int +octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd, + u32 arg) +{ + struct octeon_droq *droq; + struct octeon_config *oct_cfg = NULL; + + oct_cfg = octeon_get_conf(oct); + + if (!oct_cfg) + return -EINVAL; + + if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { + dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", + __func__, q_no, (oct->num_oqs - 1)); + return -EINVAL; + } + + droq = oct->droq[q_no]; + + if (cmd == POLL_EVENT_PROCESS_PKTS) + return octeon_droq_process_poll_pkts(oct, droq, arg); + + if (cmd == POLL_EVENT_PENDING_PKTS) { + u32 pkt_cnt = atomic_read(&droq->pkts_pending); + + return octeon_droq_process_packets(oct, droq, pkt_cnt); + } + + if (cmd == POLL_EVENT_ENABLE_INTR) { + u32 value; + unsigned long flags; + + /* Enable Pkt Interrupt */ + switch (oct->chip_id) { + case OCTEON_CN66XX: + case OCTEON_CN68XX: { + struct octeon_cn6xxx *cn6xxx = + (struct octeon_cn6xxx *)oct->chip; + spin_lock_irqsave + (&cn6xxx->lock_for_droq_int_enb_reg, flags); + value = + octeon_read_csr(oct, + CN6XXX_SLI_PKT_TIME_INT_ENB); + value |= (1 << q_no); + octeon_write_csr(oct, + CN6XXX_SLI_PKT_TIME_INT_ENB, + value); + value = + octeon_read_csr(oct, + CN6XXX_SLI_PKT_CNT_INT_ENB); + value |= (1 << q_no); + octeon_write_csr(oct, + CN6XXX_SLI_PKT_CNT_INT_ENB, + value); + + /* don't bother flushing the enables */ + + spin_unlock_irqrestore + (&cn6xxx->lock_for_droq_int_enb_reg, flags); + return 0; + } + break; + } + + return 0; + } + + dev_err(&oct->pci_dev->dev, "%s Unknown command: %d\n", __func__, cmd); + return -EINVAL; +} + +int octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, + struct octeon_droq_ops *ops) +{ + struct octeon_droq *droq; + unsigned long flags; + struct octeon_config *oct_cfg = NULL; + + oct_cfg = octeon_get_conf(oct); + + if (!oct_cfg) + return -EINVAL; + + if (!(ops)) { + dev_err(&oct->pci_dev->dev, "%s: droq_ops pointer is NULL\n", + __func__); + return -EINVAL; + } + + if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { + dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", + __func__, q_no, (oct->num_oqs - 1)); + return -EINVAL; + } + + droq = oct->droq[q_no]; + + spin_lock_irqsave(&droq->lock, flags); + + memcpy(&droq->ops, ops, sizeof(struct octeon_droq_ops)); + + spin_unlock_irqrestore(&droq->lock, flags); + + return 0; +} + +int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) +{ + unsigned long flags; + struct octeon_droq *droq; + struct octeon_config *oct_cfg = NULL; + + oct_cfg = octeon_get_conf(oct); + + if (!oct_cfg) + return -EINVAL; + + if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) { + dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n", + __func__, q_no, oct->num_oqs - 1); + return -EINVAL; + } + + droq = oct->droq[q_no]; + + if (!droq) { + dev_info(&oct->pci_dev->dev, + "Droq id (%d) not available.\n", q_no); + return 0; + } + + spin_lock_irqsave(&droq->lock, flags); + + droq->ops.fptr = NULL; + droq->ops.drop_on_max = 0; + + spin_unlock_irqrestore(&droq->lock, flags); + + return 0; +} + +int octeon_create_droq(struct octeon_device *oct, + u32 q_no, u32 num_descs, + u32 desc_size, void *app_ctx) +{ + struct octeon_droq *droq; + + if (oct->droq[q_no]) { + dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", + q_no); + return 1; + } + + /* Allocate the DS for the new droq. */ + droq = vmalloc(sizeof(*droq)); + if (!droq) + goto create_droq_fail; + memset(droq, 0, sizeof(struct octeon_droq)); + + /*Disable the pkt o/p for this Q */ + octeon_set_droq_pkt_op(oct, q_no, 0); + oct->droq[q_no] = droq; + + /* Initialize the Droq */ + octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx); + + oct->num_oqs++; + + dev_dbg(&oct->pci_dev->dev, "%s: Total number of OQ: %d\n", __func__, + oct->num_oqs); + + /* Global Droq register settings */ + + /* As of now not required, as setting are done for all 32 Droqs at + * the same time. + */ + return 0; + +create_droq_fail: + octeon_delete_droq(oct, q_no); + return -1; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h new file mode 100644 index 000000000000..7940ccee12d9 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h @@ -0,0 +1,426 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_droq.h + * \brief Implementation of Octeon Output queues. "Output" is with + * respect to the Octeon device on the NIC. From this driver's point of + * view they are ingress queues. + */ + +#ifndef __OCTEON_DROQ_H__ +#define __OCTEON_DROQ_H__ + +/* Default number of packets that will be processed in one iteration. */ +#define MAX_PACKET_BUDGET 0xFFFFFFFF + +/** Octeon descriptor format. + * The descriptor ring is made of descriptors which have 2 64-bit values: + * -# Physical (bus) address of the data buffer. + * -# Physical (bus) address of a octeon_droq_info structure. + * The Octeon device DMA's incoming packets and its information at the address + * given by these descriptor fields. + */ +struct octeon_droq_desc { + /** The buffer pointer */ + u64 buffer_ptr; + + /** The Info pointer */ + u64 info_ptr; +}; + +#define OCT_DROQ_DESC_SIZE (sizeof(struct octeon_droq_desc)) + +/** Information about packet DMA'ed by Octeon. + * The format of the information available at Info Pointer after Octeon + * has posted a packet. Not all descriptors have valid information. Only + * the Info field of the first descriptor for a packet has information + * about the packet. + */ +struct octeon_droq_info { + /** The Output Receive Header. */ + union octeon_rh rh; + + /** The Length of the packet. */ + u64 length; +}; + +#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info)) + +/** Pointer to data buffer. + * Driver keeps a pointer to the data buffer that it made available to + * the Octeon device. Since the descriptor ring keeps physical (bus) + * addresses, this field is required for the driver to keep track of + * the virtual address pointers. +*/ +struct octeon_recv_buffer { + /** Packet buffer, including metadata. */ + void *buffer; + + /** Data in the packet buffer. */ + u8 *data; +}; + +#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer)) + +/** Output Queue statistics. Each output queue has four stats fields. */ +struct oct_droq_stats { + /** Number of packets received in this queue. */ + u64 pkts_received; + + /** Bytes received by this queue. */ + u64 bytes_received; + + /** Packets dropped due to no dispatch function. */ + u64 dropped_nodispatch; + + /** Packets dropped due to no memory available. */ + u64 dropped_nomem; + + /** Packets dropped due to large number of pkts to process. */ + u64 dropped_toomany; + + /** Number of packets sent to stack from this queue. */ + u64 rx_pkts_received; + + /** Number of Bytes sent to stack from this queue. */ + u64 rx_bytes_received; + + /** Num of Packets dropped due to receive path failures. */ + u64 rx_dropped; +}; + +#define POLL_EVENT_INTR_ARRIVED 1 +#define POLL_EVENT_PROCESS_PKTS 2 +#define POLL_EVENT_PENDING_PKTS 3 +#define POLL_EVENT_ENABLE_INTR 4 + +/* The maximum number of buffers that can be dispatched from the + * output/dma queue. Set to 64 assuming 1K buffers in DROQ and the fact that + * max packet size from DROQ is 64K. + */ +#define MAX_RECV_BUFS 64 + +/** Receive Packet format used when dispatching output queue packets + * with non-raw opcodes. + * The received packet will be sent to the upper layers using this + * structure which is passed as a parameter to the dispatch function + */ +struct octeon_recv_pkt { + /** Number of buffers in this received packet */ + u16 buffer_count; + + /** Id of the device that is sending the packet up */ + u16 octeon_id; + + /** Length of data in the packet buffer */ + u32 length; + + /** The receive header */ + union octeon_rh rh; + + /** Pointer to the OS-specific packet buffer */ + void *buffer_ptr[MAX_RECV_BUFS]; + + /** Size of the buffers pointed to by ptr's in buffer_ptr */ + u32 buffer_size[MAX_RECV_BUFS]; +}; + +#define OCT_RECV_PKT_SIZE (sizeof(struct octeon_recv_pkt)) + +/** The first parameter of a dispatch function. + * For a raw mode opcode, the driver dispatches with the device + * pointer in this structure. + * For non-raw mode opcode, the driver dispatches the recv_pkt + * created to contain the buffers with data received from Octeon. + * --------------------- + * | *recv_pkt ----|--- + * |-------------------| | + * | 0 or more bytes | | + * | reserved by driver| | + * |-------------------|<-/ + * | octeon_recv_pkt | + * | | + * |___________________| + */ +struct octeon_recv_info { + void *rsvd; + struct octeon_recv_pkt *recv_pkt; +}; + +#define OCT_RECV_INFO_SIZE (sizeof(struct octeon_recv_info)) + +/** Allocate a recv_info structure. The recv_pkt pointer in the recv_info + * structure is filled in before this call returns. + * @param extra_bytes - extra bytes to be allocated at the end of the recv info + * structure. + * @return - pointer to a newly allocated recv_info structure. + */ +static inline struct octeon_recv_info *octeon_alloc_recv_info(int extra_bytes) +{ + struct octeon_recv_info *recv_info; + u8 *buf; + + buf = kmalloc(OCT_RECV_PKT_SIZE + OCT_RECV_INFO_SIZE + + extra_bytes, GFP_ATOMIC); + if (!buf) + return NULL; + + recv_info = (struct octeon_recv_info *)buf; + recv_info->recv_pkt = + (struct octeon_recv_pkt *)(buf + OCT_RECV_INFO_SIZE); + recv_info->rsvd = NULL; + if (extra_bytes) + recv_info->rsvd = buf + OCT_RECV_INFO_SIZE + OCT_RECV_PKT_SIZE; + + return recv_info; +} + +/** Free a recv_info structure. + * @param recv_info - Pointer to receive_info to be freed + */ +static inline void octeon_free_recv_info(struct octeon_recv_info *recv_info) +{ + kfree(recv_info); +} + +typedef int (*octeon_dispatch_fn_t)(struct octeon_recv_info *, void *); + +/** Used by NIC module to register packet handler and to get device + * information for each octeon device. + */ +struct octeon_droq_ops { + /** This registered function will be called by the driver with + * the octeon id, pointer to buffer from droq and length of + * data in the buffer. The receive header gives the port + * number to the caller. Function pointer is set by caller. + */ + void (*fptr)(u32, void *, u32, union octeon_rh *, void *); + + /* This function will be called by the driver for all NAPI related + * events. The first param is the octeon id. The second param is the + * output queue number. The third is the NAPI event that occurred. + */ + void (*napi_fn)(void *); + + u32 poll_mode; + + /** Flag indicating if the DROQ handler should drop packets that + * it cannot handle in one iteration. Set by caller. + */ + u32 drop_on_max; +}; + +/** The Descriptor Ring Output Queue structure. + * This structure has all the information required to implement a + * Octeon DROQ. + */ +struct octeon_droq { + /** A spinlock to protect access to this ring. */ + spinlock_t lock; + + u32 q_no; + + struct octeon_droq_ops ops; + + struct octeon_device *oct_dev; + + /** The 8B aligned descriptor ring starts at this address. */ + struct octeon_droq_desc *desc_ring; + + /** Index in the ring where the driver should read the next packet */ + u32 read_idx; + + /** Index in the ring where Octeon will write the next packet */ + u32 write_idx; + + /** Index in the ring where the driver will refill the descriptor's + * buffer + */ + u32 refill_idx; + + /** Packets pending to be processed */ + atomic_t pkts_pending; + + /** Number of descriptors in this ring. */ + u32 max_count; + + /** The number of descriptors pending refill. */ + u32 refill_count; + + u32 pkts_per_intr; + u32 refill_threshold; + + /** The max number of descriptors in DROQ without a buffer. + * This field is used to keep track of empty space threshold. If the + * refill_count reaches this value, the DROQ cannot accept a max-sized + * (64K) packet. + */ + u32 max_empty_descs; + + /** The 8B aligned info ptrs begin from this address. */ + struct octeon_droq_info *info_list; + + /** The receive buffer list. This list has the virtual addresses of the + * buffers. + */ + struct octeon_recv_buffer *recv_buf_list; + + /** The size of each buffer pointed by the buffer pointer. */ + u32 buffer_size; + + /** Pointer to the mapped packet credit register. + * Host writes number of info/buffer ptrs available to this register + */ + void __iomem *pkts_credit_reg; + + /** Pointer to the mapped packet sent register. + * Octeon writes the number of packets DMA'ed to host memory + * in this register. + */ + void __iomem *pkts_sent_reg; + + struct list_head dispatch_list; + + /** Statistics for this DROQ. */ + struct oct_droq_stats stats; + + /** DMA mapped address of the DROQ descriptor ring. */ + size_t desc_ring_dma; + + /** Info ptr list are allocated at this virtual address. */ + size_t info_base_addr; + + /** DMA mapped address of the info list */ + size_t info_list_dma; + + /** Allocated size of info list. */ + u32 info_alloc_size; + + /** application context */ + void *app_ctx; + + struct napi_struct napi; + + u32 cpu_id; + + struct call_single_data csd; +}; + +#define OCT_DROQ_SIZE (sizeof(struct octeon_droq)) + +/** + * Allocates space for the descriptor ring for the droq and sets the + * base addr, num desc etc in Octeon registers. + * + * @param oct_dev - pointer to the octeon device structure + * @param q_no - droq no. ranges from 0 - 3. + * @param app_ctx - pointer to application context + * @return Success: 0 Failure: 1 +*/ +int octeon_init_droq(struct octeon_device *oct_dev, + u32 q_no, + u32 num_descs, + u32 desc_size, + void *app_ctx); + +/** + * Frees the space for descriptor ring for the droq. + * + * @param oct_dev - pointer to the octeon device structure + * @param q_no - droq no. ranges from 0 - 3. + * @return: Success: 0 Failure: 1 +*/ +int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); + +/** Register a change in droq operations. The ops field has a pointer to a + * function which will called by the DROQ handler for all packets arriving + * on output queues given by q_no irrespective of the type of packet. + * The ops field also has a flag which if set tells the DROQ handler to + * drop packets if it receives more than what it can process in one + * invocation of the handler. + * @param oct - octeon device + * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 + * @param ops - the droq_ops settings for this queue + * @return - 0 on success, -ENODEV or -EINVAL on error. + */ +int +octeon_register_droq_ops(struct octeon_device *oct, + u32 q_no, + struct octeon_droq_ops *ops); + +/** Resets the function pointer and flag settings made by + * octeon_register_droq_ops(). After this routine is called, the DROQ handler + * will lookup dispatch function for each arriving packet on the output queue + * given by q_no. + * @param oct - octeon device + * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 + * @return - 0 on success, -ENODEV or -EINVAL on error. + */ +int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no); + +/** Register a dispatch function for a opcode/subcode. The driver will call + * this dispatch function when it receives a packet with the given + * opcode/subcode in its output queues along with the user specified + * argument. + * @param oct - the octeon device to register with. + * @param opcode - the opcode for which the dispatch will be registered. + * @param subcode - the subcode for which the dispatch will be registered + * @param fn - the dispatch function. + * @param fn_arg - user specified that will be passed along with the + * dispatch function by the driver. + * @return Success: 0; Failure: 1 + */ +int octeon_register_dispatch_fn(struct octeon_device *oct, + u16 opcode, + u16 subcode, + octeon_dispatch_fn_t fn, void *fn_arg); + +/** Remove registration for an opcode/subcode. This will delete the mapping for + * an opcode/subcode. The dispatch function will be unregistered and will no + * longer be called if a packet with the opcode/subcode arrives in the driver + * output queues. + * @param oct - the octeon device to unregister from. + * @param opcode - the opcode to be unregistered. + * @param subcode - the subcode to be unregistered. + * + * @return Success: 0; Failure: 1 + */ +int octeon_unregister_dispatch_fn(struct octeon_device *oct, + u16 opcode, + u16 subcode); + +void octeon_droq_print_stats(void); + +u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct, + struct octeon_droq *droq); + +int octeon_create_droq(struct octeon_device *oct, u32 q_no, + u32 num_descs, u32 desc_size, void *app_ctx); + +int octeon_droq_process_packets(struct octeon_device *oct, + struct octeon_droq *droq, + u32 budget); + +int octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, + int cmd, u32 arg); + +#endif /*__OCTEON_DROQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h new file mode 100644 index 000000000000..592fe49b589d --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -0,0 +1,319 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_iq.h + * \brief Host Driver: Implementation of Octeon input queues. "Input" is + * with respect to the Octeon device on the NIC. From this driver's + * point of view they are egress queues. + */ + +#ifndef __OCTEON_IQ_H__ +#define __OCTEON_IQ_H__ + +#define IQ_STATUS_RUNNING 1 + +#define IQ_SEND_OK 0 +#define IQ_SEND_STOP 1 +#define IQ_SEND_FAILED -1 + +/*------------------------- INSTRUCTION QUEUE --------------------------*/ + +/* \cond */ + +#define REQTYPE_NONE 0 +#define REQTYPE_NORESP_NET 1 +#define REQTYPE_NORESP_NET_SG 2 +#define REQTYPE_RESP_NET 3 +#define REQTYPE_RESP_NET_SG 4 +#define REQTYPE_SOFT_COMMAND 5 +#define REQTYPE_LAST 5 + +struct octeon_request_list { + u32 reqtype; + void *buf; +}; + +/* \endcond */ + +/** Input Queue statistics. Each input queue has four stats fields. */ +struct oct_iq_stats { + u64 instr_posted; /**< Instructions posted to this queue. */ + u64 instr_processed; /**< Instructions processed in this queue. */ + u64 instr_dropped; /**< Instructions that could not be processed */ + u64 bytes_sent; /**< Bytes sent through this queue. */ + u64 sgentry_sent;/**< Gather entries sent through this queue. */ + u64 tx_done;/**< Num of packets sent to network. */ + u64 tx_iq_busy;/**< Numof times this iq was found to be full. */ + u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */ + u64 tx_tot_bytes;/**< Total count of bytes sento to network. */ +}; + +#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats)) + +/** The instruction (input) queue. + * The input queue is used to post raw (instruction) mode data or packet + * data to Octeon device from the host. Each input queue (upto 4) for + * a Octeon device has one such structure to represent it. +*/ +struct octeon_instr_queue { + /** A spinlock to protect access to the input ring. */ + spinlock_t lock; + + /** Flag that indicates if the queue uses 64 byte commands. */ + u32 iqcmd_64B:1; + + /** Queue Number. */ + u32 iq_no:5; + + u32 rsvd:17; + + /* Controls the periodic flushing of iq */ + u32 do_auto_flush:1; + + u32 status:8; + + /** Maximum no. of instructions in this queue. */ + u32 max_count; + + /** Index in input ring where the driver should write the next packet */ + u32 host_write_index; + + /** Index in input ring where Octeon is expected to read the next + * packet. + */ + u32 octeon_read_index; + + /** This index aids in finding the window in the queue where Octeon + * has read the commands. + */ + u32 flush_index; + + /** This field keeps track of the instructions pending in this queue. */ + atomic_t instr_pending; + + u32 reset_instr_cnt; + + /** Pointer to the Virtual Base addr of the input ring. */ + u8 *base_addr; + + struct octeon_request_list *request_list; + + /** Octeon doorbell register for the ring. */ + void __iomem *doorbell_reg; + + /** Octeon instruction count register for this ring. */ + void __iomem *inst_cnt_reg; + + /** Number of instructions pending to be posted to Octeon. */ + u32 fill_cnt; + + /** The max. number of instructions that can be held pending by the + * driver. + */ + u32 fill_threshold; + + /** The last time that the doorbell was rung. */ + u64 last_db_time; + + /** The doorbell timeout. If the doorbell was not rung for this time and + * fill_cnt is non-zero, ring the doorbell again. + */ + u32 db_timeout; + + /** Statistics for this input queue. */ + struct oct_iq_stats stats; + + /** DMA mapped base address of the input descriptor ring. */ + u64 base_addr_dma; + + /** Application context */ + void *app_ctx; +}; + +/*---------------------- INSTRUCTION FORMAT ----------------------------*/ + +/** 32-byte instruction format. + * Format of instruction for a 32-byte mode input queue. + */ +struct octeon_instr_32B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + /** Input Request Header. Additional info about the input. */ + u64 irh; + +}; + +#define OCT_32B_INSTR_SIZE (sizeof(struct octeon_instr_32B)) + +/** 64-byte instruction format. + * Format of instruction for a 64-byte mode input queue. + */ +struct octeon_instr_64B { + /** Pointer where the input data is available. */ + u64 dptr; + + /** Instruction Header. */ + u64 ih; + + /** Input Request Header. */ + u64 irh; + + /** opcode/subcode specific parameters */ + u64 ossp[2]; + + /** Return Data Parameters */ + u64 rdp; + + /** Pointer where the response for a RAW mode packet will be written + * by Octeon. + */ + u64 rptr; + + u64 reserved; + +}; + +#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B)) + +/** The size of each buffer in soft command buffer pool + */ +#define SOFT_COMMAND_BUFFER_SIZE 1024 + +struct octeon_soft_command { + /** Soft command buffer info. */ + struct list_head node; + u64 dma_addr; + u32 size; + + /** Command and return status */ + struct octeon_instr_64B cmd; +#define COMPLETION_WORD_INIT 0xffffffffffffffffULL + u64 *status_word; + + /** Data buffer info */ + void *virtdptr; + u64 dmadptr; + u32 datasize; + + /** Return buffer info */ + void *virtrptr; + u64 dmarptr; + u32 rdatasize; + + /** Context buffer info */ + void *ctxptr; + u32 ctxsize; + + /** Time out and callback */ + size_t wait_time; + size_t timeout; + u32 iq_no; + void (*callback)(struct octeon_device *, u32, void *); + void *callback_arg; +}; + +/** Maximum number of buffers to allocate into soft command buffer pool + */ +#define MAX_SOFT_COMMAND_BUFFERS 16 + +/** Head of a soft command buffer pool. + */ +struct octeon_sc_buffer_pool { + /** List structure to add delete pending entries to */ + struct list_head head; + + /** A lock for this response list */ + spinlock_t lock; + + atomic_t alloc_buf_count; +}; + +int octeon_setup_sc_buffer_pool(struct octeon_device *oct); +int octeon_free_sc_buffer_pool(struct octeon_device *oct); +struct octeon_soft_command * + octeon_alloc_soft_command(struct octeon_device *oct, + u32 datasize, u32 rdatasize, + u32 ctxsize); +void octeon_free_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + +/** + * octeon_init_instr_queue() + * @param octeon_dev - pointer to the octeon device structure. + * @param iq_no - queue to be initialized (0 <= q_no <= 3). + * + * Called at driver init time for each input queue. iq_conf has the + * configuration parameters for the queue. + * + * @return Success: 0 Failure: 1 + */ +int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no, + u32 num_descs); + +/** + * octeon_delete_instr_queue() + * @param octeon_dev - pointer to the octeon device structure. + * @param iq_no - queue to be deleted (0 <= q_no <= 3). + * + * Called at driver unload time for each input queue. Deletes all + * allocated resources for the input queue. + * + * @return Success: 0 Failure: 1 + */ +int octeon_delete_instr_queue(struct octeon_device *octeon_dev, u32 iq_no); + +int lio_wait_for_instr_fetch(struct octeon_device *oct); + +int +octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, + void (*fn)(void *)); + +int +lio_process_iq_request_list(struct octeon_device *oct, + struct octeon_instr_queue *iq); + +int octeon_send_command(struct octeon_device *oct, u32 iq_no, + u32 force_db, void *cmd, void *buf, + u32 datasize, u32 reqtype); + +void octeon_prepare_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc, + u8 opcode, u8 subcode, + u32 irh_ossp, u64 ossp0, + u64 ossp1); + +int octeon_send_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc); + +int octeon_setup_iq(struct octeon_device *oct, u32 iq_no, + u32 num_descs, void *app_ctx); + +#endif /* __OCTEON_IQ_H__ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h new file mode 100644 index 000000000000..cbd081981180 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h @@ -0,0 +1,237 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_main.h + * \brief Host Driver: This file is included by all host driver source files + * to include common definitions. + */ + +#ifndef _OCTEON_MAIN_H_ +#define _OCTEON_MAIN_H_ + +#if BITS_PER_LONG == 32 +#define CVM_CAST64(v) ((long long)(v)) +#elif BITS_PER_LONG == 64 +#define CVM_CAST64(v) ((long long)(long)(v)) +#else +#error "Unknown system architecture" +#endif + +#define DRV_NAME "LiquidIO" + +/** + * \brief determines if a given console has debug enabled. + * @param console console to check + * @returns 1 = enabled. 0 otherwise + */ +int octeon_console_debug_enabled(u32 console); + +/* BQL-related functions */ +void octeon_report_sent_bytes_to_bql(void *buf, int reqtype); +void octeon_update_tx_completion_counters(void *buf, int reqtype, + unsigned int *pkts_compl, + unsigned int *bytes_compl); +void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl, + unsigned int bytes_compl); + +/** Swap 8B blocks */ +static inline void octeon_swap_8B_data(u64 *data, u32 blocks) +{ + while (blocks) { + cpu_to_be64s(data); + blocks--; + data++; + } +} + +/** + * \brief unmaps a PCI BAR + * @param oct Pointer to Octeon device + * @param baridx bar index + */ +static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) +{ + dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", + baridx); + + if (oct->mmio[baridx].done) + iounmap(oct->mmio[baridx].hw_addr); + + if (oct->mmio[baridx].start) + pci_release_region(oct->pci_dev, baridx * 2); +} + +/** + * \brief maps a PCI BAR + * @param oct Pointer to Octeon device + * @param baridx bar index + * @param max_map_len maximum length of mapped memory + */ +static inline int octeon_map_pci_barx(struct octeon_device *oct, + int baridx, int max_map_len) +{ + u32 mapped_len = 0; + + if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { + dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n", + baridx); + return 1; + } + + oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2); + oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2); + + mapped_len = oct->mmio[baridx].len; + if (!mapped_len) + return 1; + + if (max_map_len && (mapped_len > max_map_len)) + mapped_len = max_map_len; + + oct->mmio[baridx].hw_addr = + ioremap(oct->mmio[baridx].start, mapped_len); + oct->mmio[baridx].mapped_len = mapped_len; + + dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n", + baridx, oct->mmio[baridx].start, mapped_len, + oct->mmio[baridx].len); + + if (!oct->mmio[baridx].hw_addr) { + dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", + baridx); + return 1; + } + oct->mmio[baridx].done = 1; + + return 0; +} + +static inline void * +cnnic_alloc_aligned_dma(struct pci_dev *pci_dev, + u32 size, + u32 *alloc_size, + size_t *orig_ptr, + size_t *dma_addr __attribute__((unused))) +{ + int retries = 0; + void *ptr = NULL; + +#define OCTEON_MAX_ALLOC_RETRIES 1 + do { + ptr = + (void *)__get_free_pages(GFP_KERNEL, + get_order(size)); + if ((unsigned long)ptr & 0x07) { + free_pages((unsigned long)ptr, get_order(size)); + ptr = NULL; + /* Increment the size required if the first + * attempt failed. + */ + if (!retries) + size += 7; + } + retries++; + } while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr); + + *alloc_size = size; + *orig_ptr = (unsigned long)ptr; + if ((unsigned long)ptr & 0x07) + ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL)); + return ptr; +} + +#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \ + free_pages(orig_ptr, get_order(size)) + +static inline void +sleep_cond(wait_queue_head_t *wait_queue, int *condition) +{ + wait_queue_t we; + + init_waitqueue_entry(&we, current); + add_wait_queue(wait_queue, &we); + while (!(ACCESS_ONCE(*condition))) { + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) + goto out; + schedule(); + } +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(wait_queue, &we); +} + +static inline void +sleep_atomic_cond(wait_queue_head_t *waitq, atomic_t *pcond) +{ + wait_queue_t we; + + init_waitqueue_entry(&we, current); + add_wait_queue(waitq, &we); + while (!atomic_read(pcond)) { + set_current_state(TASK_INTERRUPTIBLE); + if (signal_pending(current)) + goto out; + schedule(); + } +out: + set_current_state(TASK_RUNNING); + remove_wait_queue(waitq, &we); +} + +/* Gives up the CPU for a timeout period. + * Check that the condition is not true before we go to sleep for a + * timeout period. + */ +static inline void +sleep_timeout_cond(wait_queue_head_t *wait_queue, + int *condition, + int timeout) +{ + wait_queue_t we; + + init_waitqueue_entry(&we, current); + add_wait_queue(wait_queue, &we); + set_current_state(TASK_INTERRUPTIBLE); + if (!(*condition)) + schedule_timeout(timeout); + set_current_state(TASK_RUNNING); + remove_wait_queue(wait_queue, &we); +} + +#ifndef ROUNDUP4 +#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc) +#endif + +#ifndef ROUNDUP8 +#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8) +#endif + +#ifndef ROUNDUP16 +#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0) +#endif + +#ifndef ROUNDUP128 +#define ROUNDUP128(val) (((val) + 127) & 0xffffff80) +#endif + +#endif /* _OCTEON_MAIN_H_ */ diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c new file mode 100644 index 000000000000..9c0fd6100405 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c @@ -0,0 +1,199 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +#define MEMOPS_IDX MAX_BAR1_MAP_INDEX + +static inline void +octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)), + u32 idx __attribute__((unused))) +{ +#ifdef __BIG_ENDIAN_BITFIELD + u32 mask; + + mask = oct->fn_list.bar1_idx_read(oct, idx); + mask = (mask & 0x2) ? (mask & ~2) : (mask | 2); + oct->fn_list.bar1_idx_write(oct, idx, mask); +#endif +} + +static void +octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, + u8 *hostbuf, u32 len) +{ + while ((len) && ((unsigned long)mapped_addr) & 7) { + writeb(*(hostbuf++), mapped_addr++); + len--; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len >= 8) { + writeq(*((u64 *)hostbuf), mapped_addr); + mapped_addr += 8; + hostbuf += 8; + len -= 8; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len--) + writeb(*(hostbuf++), mapped_addr++); +} + +static void +octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr, + u8 *hostbuf, u32 len) +{ + while ((len) && ((unsigned long)mapped_addr) & 7) { + *(hostbuf++) = readb(mapped_addr++); + len--; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len >= 8) { + *((u64 *)hostbuf) = readq(mapped_addr); + mapped_addr += 8; + hostbuf += 8; + len -= 8; + } + + octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); + + while (len--) + *(hostbuf++) = readb(mapped_addr++); +} + +/* Core mem read/write with temporary bar1 settings. */ +/* op = 1 to read, op = 0 to write. */ +static void +__octeon_pci_rw_core_mem(struct octeon_device *oct, u64 addr, + u8 *hostbuf, u32 len, u32 op) +{ + u32 copy_len = 0, index_reg_val = 0; + unsigned long flags; + u8 __iomem *mapped_addr; + + spin_lock_irqsave(&oct->mem_access_lock, flags); + + /* Save the original index reg value. */ + index_reg_val = oct->fn_list.bar1_idx_read(oct, MEMOPS_IDX); + do { + oct->fn_list.bar1_idx_setup(oct, addr, MEMOPS_IDX, 1); + mapped_addr = oct->mmio[1].hw_addr + + (MEMOPS_IDX << 22) + (addr & 0x3fffff); + + /* If operation crosses a 4MB boundary, split the transfer + * at the 4MB + * boundary. + */ + if (((addr + len - 1) & ~(0x3fffff)) != (addr & ~(0x3fffff))) { + copy_len = (u32)(((addr & ~(0x3fffff)) + + (MEMOPS_IDX << 22)) - addr); + } else { + copy_len = len; + } + + if (op) { /* read from core */ + octeon_pci_fastread(oct, mapped_addr, hostbuf, + copy_len); + } else { + octeon_pci_fastwrite(oct, mapped_addr, hostbuf, + copy_len); + } + + len -= copy_len; + addr += copy_len; + hostbuf += copy_len; + + } while (len); + + oct->fn_list.bar1_idx_write(oct, MEMOPS_IDX, index_reg_val); + + spin_unlock_irqrestore(&oct->mem_access_lock, flags); +} + +void +octeon_pci_read_core_mem(struct octeon_device *oct, + u64 coreaddr, + u8 *buf, + u32 len) +{ + __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 1); +} + +void +octeon_pci_write_core_mem(struct octeon_device *oct, + u64 coreaddr, + u8 *buf, + u32 len) +{ + __octeon_pci_rw_core_mem(oct, coreaddr, buf, len, 0); +} + +u64 octeon_read_device_mem64(struct octeon_device *oct, u64 coreaddr) +{ + u64 ret; + + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 8, 1); + + return be64_to_cpu(ret); +} + +u32 octeon_read_device_mem32(struct octeon_device *oct, u64 coreaddr) +{ + u32 ret; + + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&ret, 4, 1); + + return be32_to_cpu(ret); +} + +void octeon_write_device_mem32(struct octeon_device *oct, u64 coreaddr, + u32 val) +{ + u32 t = cpu_to_be32(val); + + __octeon_pci_rw_core_mem(oct, coreaddr, (u8 *)&t, 4, 0); +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h new file mode 100644 index 000000000000..11b183377b44 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.h @@ -0,0 +1,75 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_mem_ops.h + * \brief Host Driver: Routines used to read/write Octeon memory. + */ + +#ifndef __OCTEON_MEM_OPS_H__ +#define __OCTEON_MEM_OPS_H__ + +/** Read a 64-bit value from a BAR1 mapped core memory address. + * @param oct - pointer to the octeon device. + * @param core_addr - the address to read from. + * + * The range_idx gives the BAR1 index register for the range of address + * in which core_addr is mapped. + * + * @return 64-bit value read from Core memory + */ +u64 octeon_read_device_mem64(struct octeon_device *oct, u64 core_addr); + +/** Read a 32-bit value from a BAR1 mapped core memory address. + * @param oct - pointer to the octeon device. + * @param core_addr - the address to read from. + * + * @return 32-bit value read from Core memory + */ +u32 octeon_read_device_mem32(struct octeon_device *oct, u64 core_addr); + +/** Write a 32-bit value to a BAR1 mapped core memory address. + * @param oct - pointer to the octeon device. + * @param core_addr - the address to write to. + * @param val - 32-bit value to write. + */ +void +octeon_write_device_mem32(struct octeon_device *oct, + u64 core_addr, + u32 val); + +/** Read multiple bytes from Octeon memory. + */ +void +octeon_pci_read_core_mem(struct octeon_device *oct, + u64 coreaddr, + u8 *buf, + u32 len); + +/** Write multiple bytes into Octeon memory. + */ +void +octeon_pci_write_core_mem(struct octeon_device *oct, + u64 coreaddr, + u8 *buf, + u32 len); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h new file mode 100644 index 000000000000..b3abe5818fd3 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h @@ -0,0 +1,224 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_network.h + * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module. + */ + +#ifndef __OCTEON_NETWORK_H__ +#define __OCTEON_NETWORK_H__ +#include <linux/version.h> +#include <linux/dma-mapping.h> +#include <linux/ptp_clock_kernel.h> + +/** LiquidIO per-interface network private data */ +struct lio { + /** State of the interface. Rx/Tx happens only in the RUNNING state. */ + atomic_t ifstate; + + /** Octeon Interface index number. This device will be represented as + * oct<ifidx> in the system. + */ + int ifidx; + + /** Octeon Input queue to use to transmit for this network interface. */ + int txq; + + /** Octeon Output queue from which pkts arrive + * for this network interface. + */ + int rxq; + + /** Guards the glist */ + spinlock_t lock; + + /** Linked list of gather components */ + struct list_head glist; + + /** Pointer to the NIC properties for the Octeon device this network + * interface is associated with. + */ + struct octdev_props *octprops; + + /** Pointer to the octeon device structure. */ + struct octeon_device *oct_dev; + + struct net_device *netdev; + + /** Link information sent by the core application for this interface. */ + struct oct_link_info linfo; + + /** Size of Tx queue for this octeon device. */ + u32 tx_qsize; + + /** Size of Rx queue for this octeon device. */ + u32 rx_qsize; + + /** Size of MTU this octeon device. */ + u32 mtu; + + /** msg level flag per interface. */ + u32 msg_enable; + + /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ + u64 dev_capability; + + /** Copy of beacaon reg in phy */ + u32 phy_beacon_val; + + /** Copy of ctrl reg in phy */ + u32 led_ctrl_val; + + /* PTP clock information */ + struct ptp_clock_info ptp_info; + struct ptp_clock *ptp_clock; + s64 ptp_adjust; + + /* for atomic access to Octeon PTP reg and data struct */ + spinlock_t ptp_lock; + + /* Interface info */ + u32 intf_open; + + /* work queue for txq status */ + struct cavium_wq txq_status_wq; + +}; + +#define LIO_SIZE (sizeof(struct lio)) +#define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) + +/** + * \brief Enable or disable feature + * @param netdev pointer to network device + * @param cmd Command that just requires acknowledgment + */ +int liquidio_set_feature(struct net_device *netdev, int cmd); + +/** + * \brief Link control command completion callback + * @param nctrl_ptr pointer to control packet structure + * + * This routine is called by the callback function when a ctrl pkt sent to + * core app completes. The nctrl_ptr contains a copy of the command type + * and data sent to the core app. This routine is only called if the ctrl + * pkt was sent successfully to the core app. + */ +void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); + +/** + * \brief Register ethtool operations + * @param netdev pointer to network device + */ +void liquidio_set_ethtool_ops(struct net_device *netdev); + +static inline void +*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)), + u32 q_no __attribute__((unused)), u32 size) +{ +#define SKB_ADJ_MASK 0x3F +#define SKB_ADJ (SKB_ADJ_MASK + 1) + + struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ); + + if ((unsigned long)skb->data & SKB_ADJ_MASK) { + u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); + + skb_reserve(skb, r); + } + + return (void *)skb; +} + +static inline void recv_buffer_free(void *buffer) +{ + dev_kfree_skb_any((struct sk_buff *)buffer); +} + +#define lio_dma_alloc(oct, size, dma_addr) \ + dma_alloc_coherent(&oct->pci_dev->dev, size, dma_addr, GFP_KERNEL) +#define lio_dma_free(oct, size, virt_addr, dma_addr) \ + dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr) + +#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data) + +static inline u64 +lio_map_ring_info(struct octeon_droq *droq, u32 i) +{ + dma_addr_t dma_addr; + struct octeon_device *oct = droq->oct_dev; + + dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i], + OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE); + + BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr)); + + return (u64)dma_addr; +} + +static inline void +lio_unmap_ring_info(struct pci_dev *pci_dev, + u64 info_ptr, u32 size) +{ + dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE); +} + +static inline u64 +lio_map_ring(struct pci_dev *pci_dev, + void *buf, u32 size) +{ + dma_addr_t dma_addr; + + dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size, + DMA_FROM_DEVICE); + + BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr)); + + return (u64)dma_addr; +} + +static inline void +lio_unmap_ring(struct pci_dev *pci_dev, + u64 buf_ptr, u32 size) +{ + dma_unmap_single(&pci_dev->dev, + buf_ptr, size, + DMA_FROM_DEVICE); +} + +static inline void *octeon_fast_packet_alloc(struct octeon_device *oct, + struct octeon_droq *droq, + u32 q_no, u32 size) +{ + return recv_buffer_alloc(oct, q_no, size); +} + +static inline void octeon_fast_packet_next(struct octeon_droq *droq, + struct sk_buff *nicbuf, + int copy_len, + int idx) +{ + memcpy(skb_put(nicbuf, copy_len), + get_rbd(droq->recv_buf_list[idx].buffer), copy_len); +} + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c new file mode 100644 index 000000000000..1a0191549cb3 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -0,0 +1,189 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" +#include "octeon_mem_ops.h" + +void * +octeon_alloc_soft_command_resp(struct octeon_device *oct, + struct octeon_instr_64B *cmd, + size_t rdatasize) +{ + struct octeon_soft_command *sc; + struct octeon_instr_ih *ih; + struct octeon_instr_irh *irh; + struct octeon_instr_rdp *rdp; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, 0, rdatasize, 0); + + if (!sc) + return NULL; + + /* Copy existing command structure into the soft command */ + memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B)); + + /* Add in the response related fields. Opcode and Param are already + * there. + */ + ih = (struct octeon_instr_ih *)&sc->cmd.ih; + ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */ + + irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh->rflag = 1; /* a response is required */ + irh->len = 4; /* means four 64-bit words immediately follow irh */ + + rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp->pcie_port = oct->pcie_port; + rdp->rlen = rdatasize; + + *sc->status_word = COMPLETION_WORD_INIT; + + sc->wait_time = 1000; + sc->timeout = jiffies + sc->wait_time; + + return sc; +} + +int octnet_send_nic_data_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, + u32 xmit_more) +{ + int ring_doorbell; + + ring_doorbell = !xmit_more; + + return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, + ndata->buf, ndata->datasize, + ndata->reqtype); +} + +static void octnet_link_ctrl_callback(struct octeon_device *oct, + u32 status, + void *sc_ptr) +{ + struct octeon_soft_command *sc = (struct octeon_soft_command *)sc_ptr; + struct octnic_ctrl_pkt *nctrl; + + nctrl = (struct octnic_ctrl_pkt *)sc->ctxptr; + + /* Call the callback function if status is OK. + * Status is OK only if a response was expected and core returned + * success. + * If no response was expected, status is OK if the command was posted + * successfully. + */ + if (!status && nctrl->cb_fn) + nctrl->cb_fn(nctrl); + + octeon_free_soft_command(oct, sc); +} + +static inline struct octeon_soft_command +*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, + struct octnic_ctrl_pkt *nctrl, + struct octnic_ctrl_params nparams) +{ + struct octeon_soft_command *sc = NULL; + u8 *data; + size_t rdatasize; + u32 uddsize = 0, datasize = 0; + + uddsize = (u32)(nctrl->ncmd.s.more * 8); + + datasize = OCTNET_CMD_SIZE + uddsize; + rdatasize = (nctrl->wait_time) ? 16 : 0; + + sc = (struct octeon_soft_command *) + octeon_alloc_soft_command(oct, datasize, rdatasize, + sizeof(struct octnic_ctrl_pkt)); + + if (!sc) + return NULL; + + memcpy(sc->ctxptr, nctrl, sizeof(struct octnic_ctrl_pkt)); + + data = (u8 *)sc->virtdptr; + + memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE); + + octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3)); + + if (uddsize) { + /* Endian-Swap for UDD should have been done by caller. */ + memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize); + } + + octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, + 0, 0, 0); + + sc->callback = octnet_link_ctrl_callback; + sc->callback_arg = sc; + sc->wait_time = nctrl->wait_time; + + return sc; +} + +int +octnet_send_nic_ctrl_pkt(struct octeon_device *oct, + struct octnic_ctrl_pkt *nctrl, + struct octnic_ctrl_params nparams) +{ + int retval; + struct octeon_soft_command *sc = NULL; + + sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams); + if (!sc) { + dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n", + __func__); + return -1; + } + + retval = octeon_send_soft_command(oct, sc); + if (retval) { + octeon_free_soft_command(oct, sc); + dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n", + __func__, retval); + return -1; + } + + return retval; +} diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h new file mode 100644 index 000000000000..0238857c8105 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h @@ -0,0 +1,227 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file octeon_nic.h + * \brief Host NIC Driver: Routine to send network data & + * control packet to Octeon. + */ + +#ifndef __OCTEON_NIC_H__ +#define __OCTEON_NIC_H__ + +/* Maximum number of 8-byte words can be sent in a NIC control message. + */ +#define MAX_NCTRL_UDD 32 + +typedef void (*octnic_ctrl_pkt_cb_fn_t) (void *); + +/* Structure of control information passed by the NIC module to the OSI + * layer when sending control commands to Octeon device software. + */ +struct octnic_ctrl_pkt { + /** Command to be passed to the Octeon device software. */ + union octnet_cmd ncmd; + + /** Send buffer */ + void *data; + u64 dmadata; + + /** Response buffer */ + void *rdata; + u64 dmardata; + + /** Additional data that may be needed by some commands. */ + u64 udd[MAX_NCTRL_UDD]; + + /** Time to wait for Octeon software to respond to this control command. + * If wait_time is 0, OSI assumes no response is expected. + */ + size_t wait_time; + + /** The network device that issued the control command. */ + u64 netpndev; + + /** Callback function called when the command has been fetched */ + octnic_ctrl_pkt_cb_fn_t cb_fn; +}; + +#define MAX_UDD_SIZE(nctrl) (sizeof(nctrl->udd)) + +/** Structure of data information passed by the NIC module to the OSI + * layer when forwarding data to Octeon device software. + */ +struct octnic_data_pkt { + /** Pointer to information maintained by NIC module for this packet. The + * OSI layer passes this as-is to the driver. + */ + void *buf; + + /** Type of buffer passed in "buf" above. */ + u32 reqtype; + + /** Total data bytes to be transferred in this command. */ + u32 datasize; + + /** Command to be passed to the Octeon device software. */ + struct octeon_instr_64B cmd; + + /** Input queue to use to send this command. */ + u32 q_no; + +}; + +/** Structure passed by NIC module to OSI layer to prepare a command to send + * network data to Octeon. + */ +union octnic_cmd_setup { + struct { + u32 ifidx:8; + u32 cksum_offset:7; + u32 gather:1; + u32 timestamp:1; + u32 ipv4opts_ipv6exthdr:2; + u32 ip_csum:1; + u32 tnl_csum:1; + + u32 rsvd:11; + union { + u32 datasize; + u32 gatherptrs; + } u; + } s; + + u64 u64; + +}; + +struct octnic_ctrl_params { + u32 resp_order; +}; + +static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) +{ + return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) + >= (oct->instr_queue[q_no]->max_count - 2)); +} + +/** Utility function to prepare a 64B NIC instruction based on a setup command + * @param cmd - pointer to instruction to be filled in. + * @param setup - pointer to the setup structure + * @param q_no - which queue for back pressure + * + * Assumes the cmd instruction is pre-allocated, but no fields are filled in. + */ +static inline void +octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd, + union octnic_cmd_setup *setup, u32 tag) +{ + struct octeon_instr_ih *ih; + struct octeon_instr_irh *irh; + union octnic_packet_params packet_params; + + memset(cmd, 0, sizeof(struct octeon_instr_64B)); + + ih = (struct octeon_instr_ih *)&cmd->ih; + + /* assume that rflag is cleared so therefore front data will only have + * irh and ossp[1] and ossp[2] for a total of 24 bytes + */ + ih->fsz = 24; + + ih->tagtype = ORDERED_TAG; + ih->grp = DEFAULT_POW_GRP; + + if (tag) + ih->tag = tag; + else + ih->tag = LIO_DATA(setup->s.ifidx); + + ih->raw = 1; + ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */ + + if (!setup->s.gather) { + ih->dlengsz = setup->s.u.datasize; + } else { + ih->gather = 1; + ih->dlengsz = setup->s.u.gatherptrs; + } + + irh = (struct octeon_instr_irh *)&cmd->irh; + + irh->opcode = OPCODE_NIC; + irh->subcode = OPCODE_NIC_NW_DATA; + + packet_params.u32 = 0; + + if (setup->s.cksum_offset) { + packet_params.s.csoffset = setup->s.cksum_offset; + packet_params.s.ipv4opts_ipv6exthdr = + setup->s.ipv4opts_ipv6exthdr; + } + + packet_params.s.ip_csum = setup->s.ip_csum; + packet_params.s.tnl_csum = setup->s.tnl_csum; + packet_params.s.ifidx = setup->s.ifidx; + packet_params.s.tsflag = setup->s.timestamp; + + irh->ossp = packet_params.u32; +} + +/** Allocate and a soft command with space for a response immediately following + * the commnad. + * @param oct - octeon device pointer + * @param cmd - pointer to the command structure, pre-filled for everything + * except the response. + * @param rdatasize - size in bytes of the response. + * + * @returns pointer to allocated buffer with command copied into it, and + * response space immediately following. + */ +void * +octeon_alloc_soft_command_resp(struct octeon_device *oct, + struct octeon_instr_64B *cmd, + size_t rdatasize); + +/** Send a NIC data packet to the device + * @param oct - octeon device pointer + * @param ndata - control structure with queueing, and buffer information + * + * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the + * queue should be stopped, and IQ_SEND_OK if it sent okay. + */ +int octnet_send_nic_data_pkt(struct octeon_device *oct, + struct octnic_data_pkt *ndata, u32 xmit_more); + +/** Send a NIC control packet to the device + * @param oct - octeon device pointer + * @param nctrl - control structure with command, timout, and callback info + * @param nparams - response control structure + * + * @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the + * queue should be stopped, and IQ_SEND_OK if it sent okay. + */ +int +octnet_send_nic_ctrl_pkt(struct octeon_device *oct, + struct octnic_ctrl_pkt *nctrl, + struct octnic_ctrl_params nparams); + +#endif diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c new file mode 100644 index 000000000000..adb428463495 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -0,0 +1,764 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" + +#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \ + (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count) + +struct iq_post_status { + int status; + int index; +}; + +static void check_db_timeout(struct work_struct *work); +static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no); + +static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *); + +static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) +{ + struct octeon_instr_queue *iq = + (struct octeon_instr_queue *)oct->instr_queue[iq_no]; + return iq->iqcmd_64B; +} + +#define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no)) + +/* Define this to return the request status comaptible to old code */ +/*#define OCTEON_USE_OLD_REQ_STATUS*/ + +/* Return 0 on success, 1 on failure */ +int octeon_init_instr_queue(struct octeon_device *oct, + u32 iq_no, u32 num_descs) +{ + struct octeon_instr_queue *iq; + struct octeon_iq_config *conf = NULL; + u32 q_size; + struct cavium_wq *db_wq; + + if (OCTEON_CN6XXX(oct)) + conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf))); + + if (!conf) { + dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n", + oct->chip_id); + return 1; + } + + if (num_descs & (num_descs - 1)) { + dev_err(&oct->pci_dev->dev, + "Number of descriptors for instr queue %d not in power of 2.\n", + iq_no); + return 1; + } + + q_size = (u32)conf->instr_type * num_descs; + + iq = oct->instr_queue[iq_no]; + + iq->base_addr = lio_dma_alloc(oct, q_size, + (dma_addr_t *)&iq->base_addr_dma); + if (!iq->base_addr) { + dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n", + iq_no); + return 1; + } + + iq->max_count = num_descs; + + /* Initialize a list to holds requests that have been posted to Octeon + * but has yet to be fetched by octeon + */ + iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs); + if (!iq->request_list) { + lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); + dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n", + iq_no); + return 1; + } + + memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs); + + dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n", + iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count); + + iq->iq_no = iq_no; + iq->fill_threshold = (u32)conf->db_min; + iq->fill_cnt = 0; + iq->host_write_index = 0; + iq->octeon_read_index = 0; + iq->flush_index = 0; + iq->last_db_time = 0; + iq->do_auto_flush = 1; + iq->db_timeout = (u32)conf->db_timeout; + atomic_set(&iq->instr_pending, 0); + + /* Initialize the spinlock for this instruction queue */ + spin_lock_init(&iq->lock); + + oct->io_qmask.iq |= (1 << iq_no); + + /* Set the 32B/64B mode for each input queue */ + oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); + iq->iqcmd_64B = (conf->instr_type == 64); + + oct->fn_list.setup_iq_regs(oct, iq_no); + + oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); + if (!oct->check_db_wq[iq_no].wq) { + lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); + dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", + iq_no); + return 1; + } + + db_wq = &oct->check_db_wq[iq_no]; + + INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout); + db_wq->wk.ctxptr = oct; + db_wq->wk.ctxul = iq_no; + queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); + + return 0; +} + +int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no) +{ + u64 desc_size = 0, q_size; + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + + cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work); + flush_workqueue(oct->check_db_wq[iq_no].wq); + destroy_workqueue(oct->check_db_wq[iq_no].wq); + + if (OCTEON_CN6XXX(oct)) + desc_size = + CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf)); + + if (iq->request_list) + vfree(iq->request_list); + + if (iq->base_addr) { + q_size = iq->max_count * desc_size; + lio_dma_free(oct, (u32)q_size, iq->base_addr, + iq->base_addr_dma); + return 0; + } + return 1; +} + +/* Return 0 on success, 1 on failure */ +int octeon_setup_iq(struct octeon_device *oct, + u32 iq_no, + u32 num_descs, + void *app_ctx) +{ + if (oct->instr_queue[iq_no]) { + dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n", + iq_no); + oct->instr_queue[iq_no]->app_ctx = app_ctx; + return 0; + } + oct->instr_queue[iq_no] = + vmalloc(sizeof(struct octeon_instr_queue)); + if (!oct->instr_queue[iq_no]) + return 1; + + memset(oct->instr_queue[iq_no], 0, + sizeof(struct octeon_instr_queue)); + + oct->instr_queue[iq_no]->app_ctx = app_ctx; + if (octeon_init_instr_queue(oct, iq_no, num_descs)) { + vfree(oct->instr_queue[iq_no]); + oct->instr_queue[iq_no] = NULL; + return 1; + } + + oct->num_iqs++; + oct->fn_list.enable_io_queues(oct); + return 0; +} + +int lio_wait_for_instr_fetch(struct octeon_device *oct) +{ + int i, retry = 1000, pending, instr_cnt = 0; + + do { + instr_cnt = 0; + + /*for (i = 0; i < oct->num_iqs; i++) {*/ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { + if (!(oct->io_qmask.iq & (1UL << i))) + continue; + pending = + atomic_read(&oct-> + instr_queue[i]->instr_pending); + if (pending) + __check_db_timeout(oct, i); + instr_cnt += pending; + } + + if (instr_cnt == 0) + break; + + schedule_timeout_uninterruptible(1); + + } while (retry-- && instr_cnt); + + return instr_cnt; +} + +static inline void +ring_doorbell(struct octeon_device *oct, struct octeon_instr_queue *iq) +{ + if (atomic_read(&oct->status) == OCT_DEV_RUNNING) { + writel(iq->fill_cnt, iq->doorbell_reg); + /* make sure doorbell write goes through */ + mmiowb(); + iq->fill_cnt = 0; + iq->last_db_time = jiffies; + return; + } +} + +static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq, + u8 *cmd) +{ + u8 *iqptr, cmdsize; + + cmdsize = ((iq->iqcmd_64B) ? 64 : 32); + iqptr = iq->base_addr + (cmdsize * iq->host_write_index); + + memcpy(iqptr, cmd, cmdsize); +} + +static inline int +__post_command(struct octeon_device *octeon_dev __attribute__((unused)), + struct octeon_instr_queue *iq, + u32 force_db __attribute__((unused)), u8 *cmd) +{ + u32 index = -1; + + /* This ensures that the read index does not wrap around to the same + * position if queue gets full before Octeon could fetch any instr. + */ + if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) + return -1; + + __copy_cmd_into_iq(iq, cmd); + + /* "index" is returned, host_write_index is modified. */ + index = iq->host_write_index; + INCR_INDEX_BY1(iq->host_write_index, iq->max_count); + iq->fill_cnt++; + + /* Flush the command into memory. We need to be sure the data is in + * memory before indicating that the instruction is pending. + */ + wmb(); + + atomic_inc(&iq->instr_pending); + + return index; +} + +static inline struct iq_post_status +__post_command2(struct octeon_device *octeon_dev __attribute__((unused)), + struct octeon_instr_queue *iq, + u32 force_db __attribute__((unused)), u8 *cmd) +{ + struct iq_post_status st; + + st.status = IQ_SEND_OK; + + /* This ensures that the read index does not wrap around to the same + * position if queue gets full before Octeon could fetch any instr. + */ + if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1)) { + st.status = IQ_SEND_FAILED; + st.index = -1; + return st; + } + + if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 2)) + st.status = IQ_SEND_STOP; + + __copy_cmd_into_iq(iq, cmd); + + /* "index" is returned, host_write_index is modified. */ + st.index = iq->host_write_index; + INCR_INDEX_BY1(iq->host_write_index, iq->max_count); + iq->fill_cnt++; + + /* Flush the command into memory. We need to be sure the data is in + * memory before indicating that the instruction is pending. + */ + wmb(); + + atomic_inc(&iq->instr_pending); + + return st; +} + +int +octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype, + void (*fn)(void *)) +{ + if (reqtype > REQTYPE_LAST) { + dev_err(&oct->pci_dev->dev, "%s: Invalid reqtype: %d\n", + __func__, reqtype); + return -EINVAL; + } + + reqtype_free_fn[oct->octeon_id][reqtype] = fn; + + return 0; +} + +static inline void +__add_to_request_list(struct octeon_instr_queue *iq, + int idx, void *buf, int reqtype) +{ + iq->request_list[idx].buf = buf; + iq->request_list[idx].reqtype = reqtype; +} + +int +lio_process_iq_request_list(struct octeon_device *oct, + struct octeon_instr_queue *iq) +{ + int reqtype; + void *buf; + u32 old = iq->flush_index; + u32 inst_count = 0; + unsigned pkts_compl = 0, bytes_compl = 0; + struct octeon_soft_command *sc; + struct octeon_instr_irh *irh; + + while (old != iq->octeon_read_index) { + reqtype = iq->request_list[old].reqtype; + buf = iq->request_list[old].buf; + + if (reqtype == REQTYPE_NONE) + goto skip_this; + + octeon_update_tx_completion_counters(buf, reqtype, &pkts_compl, + &bytes_compl); + + switch (reqtype) { + case REQTYPE_NORESP_NET: + case REQTYPE_NORESP_NET_SG: + case REQTYPE_RESP_NET_SG: + reqtype_free_fn[oct->octeon_id][reqtype](buf); + break; + case REQTYPE_RESP_NET: + case REQTYPE_SOFT_COMMAND: + sc = buf; + + irh = (struct octeon_instr_irh *)&sc->cmd.irh; + if (irh->rflag) { + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_bh(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_bh(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock); + } else { + if (sc->callback) { + sc->callback(oct, OCTEON_REQUEST_DONE, + sc->callback_arg); + } + } + break; + default: + dev_err(&oct->pci_dev->dev, + "%s Unknown reqtype: %d buf: %p at idx %d\n", + __func__, reqtype, buf, old); + } + + iq->request_list[old].buf = NULL; + iq->request_list[old].reqtype = 0; + + skip_this: + inst_count++; + INCR_INDEX_BY1(old, iq->max_count); + } + if (bytes_compl) + octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl, + bytes_compl); + iq->flush_index = old; + + return inst_count; +} + +static inline void +update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq) +{ + u32 inst_processed = 0; + + /* Calculate how many commands Octeon has read and move the read index + * accordingly. + */ + iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq); + + /* Move the NORESPONSE requests to the per-device completion list. */ + if (iq->flush_index != iq->octeon_read_index) + inst_processed = lio_process_iq_request_list(oct, iq); + + if (inst_processed) + atomic_sub(inst_processed, &iq->instr_pending); + iq->stats.instr_processed += inst_processed; +} + +static void +octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq, + u32 pending_thresh) +{ + if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) { + spin_lock_bh(&iq->lock); + update_iq_indices(oct, iq); + spin_unlock_bh(&iq->lock); + } +} + +static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no) +{ + struct octeon_instr_queue *iq; + u64 next_time; + + if (!oct) + return; + iq = oct->instr_queue[iq_no]; + if (!iq) + return; + + /* If jiffies - last_db_time < db_timeout do nothing */ + next_time = iq->last_db_time + iq->db_timeout; + if (!time_after(jiffies, (unsigned long)next_time)) + return; + iq->last_db_time = jiffies; + + /* Get the lock and prevent tasklets. This routine gets called from + * the poll thread. Instructions can now be posted in tasklet context + */ + spin_lock_bh(&iq->lock); + if (iq->fill_cnt != 0) + ring_doorbell(oct, iq); + + spin_unlock_bh(&iq->lock); + + /* Flush the instruction queue */ + if (iq->do_auto_flush) + octeon_flush_iq(oct, iq, 1); +} + +/* Called by the Poll thread at regular intervals to check the instruction + * queue for commands to be posted and for commands that were fetched by Octeon. + */ +static void check_db_timeout(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; + unsigned long iq_no = wk->ctxul; + struct cavium_wq *db_wq = &oct->check_db_wq[iq_no]; + + __check_db_timeout(oct, iq_no); + queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); +} + +int +octeon_send_command(struct octeon_device *oct, u32 iq_no, + u32 force_db, void *cmd, void *buf, + u32 datasize, u32 reqtype) +{ + struct iq_post_status st; + struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; + + spin_lock_bh(&iq->lock); + + st = __post_command2(oct, iq, force_db, cmd); + + if (st.status != IQ_SEND_FAILED) { + octeon_report_sent_bytes_to_bql(buf, reqtype); + __add_to_request_list(iq, st.index, buf, reqtype); + INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); + INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); + + if (iq->fill_cnt >= iq->fill_threshold || force_db) + ring_doorbell(oct, iq); + } else { + INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); + } + + spin_unlock_bh(&iq->lock); + + if (iq->do_auto_flush) + octeon_flush_iq(oct, iq, 2); + + return st.status; +} + +void +octeon_prepare_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc, + u8 opcode, + u8 subcode, + u32 irh_ossp, + u64 ossp0, + u64 ossp1) +{ + struct octeon_config *oct_cfg; + struct octeon_instr_ih *ih; + struct octeon_instr_irh *irh; + struct octeon_instr_rdp *rdp; + + BUG_ON(opcode > 15); + BUG_ON(subcode > 127); + + oct_cfg = octeon_get_conf(oct); + + ih = (struct octeon_instr_ih *)&sc->cmd.ih; + ih->tagtype = ATOMIC_TAG; + ih->tag = LIO_CONTROL; + ih->raw = 1; + ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg); + + if (sc->datasize) { + ih->dlengsz = sc->datasize; + ih->rs = 1; + } + + irh = (struct octeon_instr_irh *)&sc->cmd.irh; + irh->opcode = opcode; + irh->subcode = subcode; + + /* opcode/subcode specific parameters (ossp) */ + irh->ossp = irh_ossp; + sc->cmd.ossp[0] = ossp0; + sc->cmd.ossp[1] = ossp1; + + if (sc->rdatasize) { + rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + rdp->pcie_port = oct->pcie_port; + rdp->rlen = sc->rdatasize; + + irh->rflag = 1; + irh->len = 4; + ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */ + } else { + irh->rflag = 0; + irh->len = 2; + ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */ + } + + while (!(oct->io_qmask.iq & (1 << sc->iq_no))) + sc->iq_no++; +} + +int octeon_send_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc) +{ + struct octeon_instr_ih *ih; + struct octeon_instr_irh *irh; + struct octeon_instr_rdp *rdp; + + ih = (struct octeon_instr_ih *)&sc->cmd.ih; + if (ih->dlengsz) { + BUG_ON(!sc->dmadptr); + sc->cmd.dptr = sc->dmadptr; + } + + irh = (struct octeon_instr_irh *)&sc->cmd.irh; + if (irh->rflag) { + BUG_ON(!sc->dmarptr); + BUG_ON(!sc->status_word); + *sc->status_word = COMPLETION_WORD_INIT; + + rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + + sc->cmd.rptr = sc->dmarptr; + } + + if (sc->wait_time) + sc->timeout = jiffies + sc->wait_time; + + return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, + (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND); +} + +int octeon_setup_sc_buffer_pool(struct octeon_device *oct) +{ + int i; + u64 dma_addr; + struct octeon_soft_command *sc; + + INIT_LIST_HEAD(&oct->sc_buf_pool.head); + spin_lock_init(&oct->sc_buf_pool.lock); + atomic_set(&oct->sc_buf_pool.alloc_buf_count, 0); + + for (i = 0; i < MAX_SOFT_COMMAND_BUFFERS; i++) { + sc = (struct octeon_soft_command *) + lio_dma_alloc(oct, + SOFT_COMMAND_BUFFER_SIZE, + (dma_addr_t *)&dma_addr); + if (!sc) + return 1; + + sc->dma_addr = dma_addr; + sc->size = SOFT_COMMAND_BUFFER_SIZE; + + list_add_tail(&sc->node, &oct->sc_buf_pool.head); + } + + return 0; +} + +int octeon_free_sc_buffer_pool(struct octeon_device *oct) +{ + struct list_head *tmp, *tmp2; + struct octeon_soft_command *sc; + + spin_lock(&oct->sc_buf_pool.lock); + + list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { + list_del(tmp); + + sc = (struct octeon_soft_command *)tmp; + + lio_dma_free(oct, sc->size, sc, sc->dma_addr); + } + + INIT_LIST_HEAD(&oct->sc_buf_pool.head); + + spin_unlock(&oct->sc_buf_pool.lock); + + return 0; +} + +struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, + u32 datasize, + u32 rdatasize, + u32 ctxsize) +{ + u64 dma_addr; + u32 size; + u32 offset = sizeof(struct octeon_soft_command); + struct octeon_soft_command *sc = NULL; + struct list_head *tmp; + + BUG_ON((offset + datasize + rdatasize + ctxsize) > + SOFT_COMMAND_BUFFER_SIZE); + + spin_lock(&oct->sc_buf_pool.lock); + + if (list_empty(&oct->sc_buf_pool.head)) { + spin_unlock(&oct->sc_buf_pool.lock); + return NULL; + } + + list_for_each(tmp, &oct->sc_buf_pool.head) + break; + + list_del(tmp); + + atomic_inc(&oct->sc_buf_pool.alloc_buf_count); + + spin_unlock(&oct->sc_buf_pool.lock); + + sc = (struct octeon_soft_command *)tmp; + + dma_addr = sc->dma_addr; + size = sc->size; + + memset(sc, 0, sc->size); + + sc->dma_addr = dma_addr; + sc->size = size; + + if (ctxsize) { + sc->ctxptr = (u8 *)sc + offset; + sc->ctxsize = ctxsize; + } + + /* Start data at 128 byte boundary */ + offset = (offset + ctxsize + 127) & 0xffffff80; + + if (datasize) { + sc->virtdptr = (u8 *)sc + offset; + sc->dmadptr = dma_addr + offset; + sc->datasize = datasize; + } + + /* Start rdata at 128 byte boundary */ + offset = (offset + datasize + 127) & 0xffffff80; + + if (rdatasize) { + BUG_ON(rdatasize < 16); + sc->virtrptr = (u8 *)sc + offset; + sc->dmarptr = dma_addr + offset; + sc->rdatasize = rdatasize; + sc->status_word = (u64 *)((u8 *)(sc->virtrptr) + rdatasize - 8); + } + + return sc; +} + +void octeon_free_soft_command(struct octeon_device *oct, + struct octeon_soft_command *sc) +{ + spin_lock(&oct->sc_buf_pool.lock); + + list_add_tail(&sc->node, &oct->sc_buf_pool.head); + + atomic_dec(&oct->sc_buf_pool.alloc_buf_count); + + spin_unlock(&oct->sc_buf_pool.lock); +} diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c new file mode 100644 index 000000000000..091f537a946e --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -0,0 +1,178 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ +#include <linux/version.h> +#include <linux/types.h> +#include <linux/list.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/pci.h> +#include <linux/kthread.h> +#include <linux/netdevice.h> +#include "octeon_config.h" +#include "liquidio_common.h" +#include "octeon_droq.h" +#include "octeon_iq.h" +#include "response_manager.h" +#include "octeon_device.h" +#include "octeon_nic.h" +#include "octeon_main.h" +#include "octeon_network.h" +#include "cn66xx_regs.h" +#include "cn66xx_device.h" +#include "cn68xx_regs.h" +#include "cn68xx_device.h" +#include "liquidio_image.h" + +static void oct_poll_req_completion(struct work_struct *work); + +int octeon_setup_response_list(struct octeon_device *oct) +{ + int i, ret = 0; + struct cavium_wq *cwq; + + for (i = 0; i < MAX_RESPONSE_LISTS; i++) { + INIT_LIST_HEAD(&oct->response_list[i].head); + spin_lock_init(&oct->response_list[i].lock); + atomic_set(&oct->response_list[i].pending_req_count, 0); + } + + oct->dma_comp_wq.wq = create_workqueue("dma-comp"); + if (!oct->dma_comp_wq.wq) { + dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); + return -ENOMEM; + } + + cwq = &oct->dma_comp_wq; + INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion); + cwq->wk.ctxptr = oct; + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); + + return ret; +} + +void octeon_delete_response_list(struct octeon_device *oct) +{ + cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work); + flush_workqueue(oct->dma_comp_wq.wq); + destroy_workqueue(oct->dma_comp_wq.wq); +} + +int lio_process_ordered_list(struct octeon_device *octeon_dev, + u32 force_quit) +{ + struct octeon_response_list *ordered_sc_list; + struct octeon_soft_command *sc; + int request_complete = 0; + int resp_to_process = MAX_ORD_REQS_TO_PROCESS; + u32 status; + u64 status64; + struct octeon_instr_rdp *rdp; + + ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; + + do { + spin_lock_bh(&ordered_sc_list->lock); + + if (ordered_sc_list->head.next == &ordered_sc_list->head) { + /* ordered_sc_list is empty; there is + * nothing to process + */ + spin_unlock_bh + (&ordered_sc_list->lock); + return 1; + } + + sc = (struct octeon_soft_command *)ordered_sc_list-> + head.next; + rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp; + + status = OCTEON_REQUEST_PENDING; + + /* check if octeon has finished DMA'ing a response + * to where rptr is pointing to + */ + dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev, + sc->cmd.rptr, rdp->rlen, + DMA_FROM_DEVICE); + status64 = *sc->status_word; + + if (status64 != COMPLETION_WORD_INIT) { + if ((status64 & 0xff) != 0xff) { + octeon_swap_8B_data(&status64, 1); + if (((status64 & 0xff) != 0xff)) { + status = (u32)(status64 & + 0xffffffffULL); + } + } + } else if (force_quit || (sc->timeout && + time_after(jiffies, (unsigned long)sc->timeout))) { + status = OCTEON_REQUEST_TIMEOUT; + } + + if (status != OCTEON_REQUEST_PENDING) { + /* we have received a response or we have timed out */ + /* remove node from linked list */ + list_del(&sc->node); + atomic_dec(&octeon_dev->response_list + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + spin_unlock_bh + (&ordered_sc_list->lock); + + if (sc->callback) + sc->callback(octeon_dev, status, + sc->callback_arg); + + request_complete++; + + } else { + /* no response yet */ + request_complete = 0; + spin_unlock_bh + (&ordered_sc_list->lock); + } + + /* If we hit the Max Ordered requests to process every loop, + * we quit + * and let this function be invoked the next time the poll + * thread runs + * to process the remaining requests. This function can take up + * the entire CPU if there is no upper limit to the requests + * processed. + */ + if (request_complete >= resp_to_process) + break; + } while (request_complete); + + return 0; +} + +static void oct_poll_req_completion(struct work_struct *work) +{ + struct cavium_wk *wk = (struct cavium_wk *)work; + struct octeon_device *oct = (struct octeon_device *)wk->ctxptr; + struct cavium_wq *cwq = &oct->dma_comp_wq; + + lio_process_ordered_list(oct, 0); + + queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); +} diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h new file mode 100644 index 000000000000..7a48752dcb10 --- /dev/null +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -0,0 +1,140 @@ +/********************************************************************** + * Author: Cavium, Inc. + * + * Contact: support@cavium.com + * Please include "LiquidIO" in the subject. + * + * Copyright (c) 2003-2015 Cavium, Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium, Inc. for more information + **********************************************************************/ + +/*! \file response_manager.h + * \brief Host Driver: Response queues for host instructions. + */ + +#ifndef __RESPONSE_MANAGER_H__ +#define __RESPONSE_MANAGER_H__ + +/** Maximum ordered requests to process in every invocation of + * lio_process_ordered_list(). The function will continue to process requests + * as long as it can find one that has finished processing. If it keeps + * finding requests that have completed, the function can run for ever. The + * value defined here sets an upper limit on the number of requests it can + * process before it returns control to the poll thread. + */ +#define MAX_ORD_REQS_TO_PROCESS 4096 + +/** Head of a response list. There are several response lists in the + * system. One for each response order- Unordered, ordered + * and 1 for noresponse entries on each instruction queue. + */ +struct octeon_response_list { + /** List structure to add delete pending entries to */ + struct list_head head; + + /** A lock for this response list */ + spinlock_t lock; + + atomic_t pending_req_count; +}; + +/** The type of response list. + */ +enum { + OCTEON_ORDERED_LIST = 0, + OCTEON_UNORDERED_NONBLOCKING_LIST = 1, + OCTEON_UNORDERED_BLOCKING_LIST = 2, + OCTEON_ORDERED_SC_LIST = 3 +}; + +/** Response Order values for a Octeon Request. */ +enum { + OCTEON_RESP_ORDERED = 0, + OCTEON_RESP_UNORDERED = 1, + OCTEON_RESP_NORESPONSE = 2 +}; + +/** Error codes used in Octeon Host-Core communication. + * + * 31 16 15 0 + * --------------------------------- + * | | | + * --------------------------------- + * Error codes are 32-bit wide. The upper 16-bits, called Major Error Number, + * are reserved to identify the group to which the error code belongs. The + * lower 16-bits, called Minor Error Number, carry the actual code. + * + * So error codes are (MAJOR NUMBER << 16)| MINOR_NUMBER. + */ + +/*------------ Error codes used by host driver -----------------*/ +#define DRIVER_MAJOR_ERROR_CODE 0x0000 + +/** A value of 0x00000000 indicates no error i.e. success */ +#define DRIVER_ERROR_NONE 0x00000000 + +/** (Major number: 0x0000; Minor Number: 0x0001) */ +#define DRIVER_ERROR_REQ_PENDING 0x00000001 +#define DRIVER_ERROR_REQ_TIMEOUT 0x00000003 +#define DRIVER_ERROR_REQ_EINTR 0x00000004 +#define DRIVER_ERROR_REQ_ENXIO 0x00000006 +#define DRIVER_ERROR_REQ_ENOMEM 0x0000000C +#define DRIVER_ERROR_REQ_EINVAL 0x00000016 +#define DRIVER_ERROR_REQ_FAILED 0x000000ff + +/** Status for a request. + * If a request is not queued to Octeon by the driver, the driver returns + * an error condition that's describe by one of the OCTEON_REQ_ERR_* value + * below. If the request is successfully queued, the driver will return + * a OCTEON_REQUEST_PENDING status. OCTEON_REQUEST_TIMEOUT and + * OCTEON_REQUEST_INTERRUPTED are only returned by the driver if the + * response for request failed to arrive before a time-out period or if + * the request processing * got interrupted due to a signal respectively. + */ +enum { + OCTEON_REQUEST_DONE = (DRIVER_ERROR_NONE), + OCTEON_REQUEST_PENDING = (DRIVER_ERROR_REQ_PENDING), + OCTEON_REQUEST_TIMEOUT = (DRIVER_ERROR_REQ_TIMEOUT), + OCTEON_REQUEST_INTERRUPTED = (DRIVER_ERROR_REQ_EINTR), + OCTEON_REQUEST_NO_DEVICE = (0x00000021), + OCTEON_REQUEST_NOT_RUNNING, + OCTEON_REQUEST_INVALID_IQ, + OCTEON_REQUEST_INVALID_BUFCNT, + OCTEON_REQUEST_INVALID_RESP_ORDER, + OCTEON_REQUEST_NO_MEMORY, + OCTEON_REQUEST_INVALID_BUFSIZE, + OCTEON_REQUEST_NO_PENDING_ENTRY, + OCTEON_REQUEST_NO_IQ_SPACE = (0x7FFFFFFF) + +}; + +/** Initialize the response lists. The number of response lists to create is + * given by count. + * @param octeon_dev - the octeon device structure. + */ +int octeon_setup_response_list(struct octeon_device *octeon_dev); + +void octeon_delete_response_list(struct octeon_device *octeon_dev); + +/** Check the status of first entry in the ordered list. If the instruction at + * that entry finished processing or has timed-out, the entry is cleaned. + * @param octeon_dev - the octeon device structure. + * @param force_quit - the request is forced to timeout if this is 1 + * @return 1 if the ordered list is empty, 0 otherwise. + */ +int lio_process_ordered_list(struct octeon_device *octeon_dev, + u32 force_quit); + +#endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index 4d627a8f04b0..2e70228272b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1337,6 +1337,10 @@ int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n, int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n, const unsigned int *valp); int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr); +void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, + unsigned int *pif_req_wrptr, + unsigned int *pif_rsp_wrptr); +void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp); void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres); const char *t4_get_port_type_description(enum fw_port_type port_type); void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p); @@ -1362,6 +1366,7 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf); +void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate); void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid); void t4_wol_magic_enable(struct adapter *adap, unsigned int port, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 3719807efddd..484eb8c37489 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -182,6 +182,95 @@ static const struct file_operations cim_la_fops = { .release = seq_release_private }; +static int cim_pif_la_show(struct seq_file *seq, void *v, int idx) +{ + const u32 *p = v; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "Cntl ID DataBE Addr Data\n"); + } else if (idx < CIM_PIFLA_SIZE) { + seq_printf(seq, " %02x %02x %04x %08x %08x%08x%08x%08x\n", + (p[5] >> 22) & 0xff, (p[5] >> 16) & 0x3f, + p[5] & 0xffff, p[4], p[3], p[2], p[1], p[0]); + } else { + if (idx == CIM_PIFLA_SIZE) + seq_puts(seq, "\nCntl ID Data\n"); + seq_printf(seq, " %02x %02x %08x%08x%08x%08x\n", + (p[4] >> 6) & 0xff, p[4] & 0x3f, + p[3], p[2], p[1], p[0]); + } + return 0; +} + +static int cim_pif_la_open(struct inode *inode, struct file *file) +{ + struct seq_tab *p; + struct adapter *adap = inode->i_private; + + p = seq_open_tab(file, 2 * CIM_PIFLA_SIZE, 6 * sizeof(u32), 1, + cim_pif_la_show); + if (!p) + return -ENOMEM; + + t4_cim_read_pif_la(adap, (u32 *)p->data, + (u32 *)p->data + 6 * CIM_PIFLA_SIZE, NULL, NULL); + return 0; +} + +static const struct file_operations cim_pif_la_fops = { + .owner = THIS_MODULE, + .open = cim_pif_la_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + +static int cim_ma_la_show(struct seq_file *seq, void *v, int idx) +{ + const u32 *p = v; + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "\n"); + } else if (idx < CIM_MALA_SIZE) { + seq_printf(seq, "%02x%08x%08x%08x%08x\n", + p[4], p[3], p[2], p[1], p[0]); + } else { + if (idx == CIM_MALA_SIZE) + seq_puts(seq, + "\nCnt ID Tag UE Data RDY VLD\n"); + seq_printf(seq, "%3u %2u %x %u %08x%08x %u %u\n", + (p[2] >> 10) & 0xff, (p[2] >> 7) & 7, + (p[2] >> 3) & 0xf, (p[2] >> 2) & 1, + (p[1] >> 2) | ((p[2] & 3) << 30), + (p[0] >> 2) | ((p[1] & 3) << 30), (p[0] >> 1) & 1, + p[0] & 1); + } + return 0; +} + +static int cim_ma_la_open(struct inode *inode, struct file *file) +{ + struct seq_tab *p; + struct adapter *adap = inode->i_private; + + p = seq_open_tab(file, 2 * CIM_MALA_SIZE, 5 * sizeof(u32), 1, + cim_ma_la_show); + if (!p) + return -ENOMEM; + + t4_cim_read_ma_la(adap, (u32 *)p->data, + (u32 *)p->data + 5 * CIM_MALA_SIZE); + return 0; +} + +static const struct file_operations cim_ma_la_fops = { + .owner = THIS_MODULE, + .open = cim_ma_la_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private +}; + static int cim_qcfg_show(struct seq_file *seq, void *v) { static const char * const qname[] = { @@ -663,6 +752,39 @@ static const struct file_operations pm_stats_debugfs_fops = { .write = pm_stats_clear }; +static int tx_rate_show(struct seq_file *seq, void *v) +{ + u64 nrate[NCHAN], orate[NCHAN]; + struct adapter *adap = seq->private; + + t4_get_chan_txrate(adap, nrate, orate); + if (adap->params.arch.nchan == NCHAN) { + seq_puts(seq, " channel 0 channel 1 " + "channel 2 channel 3\n"); + seq_printf(seq, "NIC B/s: %10llu %10llu %10llu %10llu\n", + (unsigned long long)nrate[0], + (unsigned long long)nrate[1], + (unsigned long long)nrate[2], + (unsigned long long)nrate[3]); + seq_printf(seq, "Offload B/s: %10llu %10llu %10llu %10llu\n", + (unsigned long long)orate[0], + (unsigned long long)orate[1], + (unsigned long long)orate[2], + (unsigned long long)orate[3]); + } else { + seq_puts(seq, " channel 0 channel 1\n"); + seq_printf(seq, "NIC B/s: %10llu %10llu\n", + (unsigned long long)nrate[0], + (unsigned long long)nrate[1]); + seq_printf(seq, "Offload B/s: %10llu %10llu\n", + (unsigned long long)orate[0], + (unsigned long long)orate[1]); + } + return 0; +} + +DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate); + static int cctrl_tbl_show(struct seq_file *seq, void *v) { static const char * const dec_fac[] = { @@ -2128,6 +2250,8 @@ int t4_setup_debugfs(struct adapter *adap) static struct t4_debugfs_entry t4_debugfs_files[] = { { "cim_la", &cim_la_fops, S_IRUSR, 0 }, + { "cim_pif_la", &cim_pif_la_fops, S_IRUSR, 0 }, + { "cim_ma_la", &cim_ma_la_fops, S_IRUSR, 0 }, { "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 }, { "clk", &clk_debugfs_fops, S_IRUSR, 0 }, { "devlog", &devlog_fops, S_IRUSR, 0 }, @@ -2163,6 +2287,7 @@ int t4_setup_debugfs(struct adapter *adap) { "ulprx_la", &ulprx_la_fops, S_IRUSR, 0 }, { "sensors", &sensors_debugfs_fops, S_IRUSR, 0 }, { "pm_stats", &pm_stats_debugfs_fops, S_IRUSR, 0 }, + { "tx_rate", &tx_rate_debugfs_fops, S_IRUSR, 0 }, { "cctrl", &cctrl_tbl_debugfs_fops, S_IRUSR, 0 }, #if IS_ENABLED(CONFIG_IPV6) { "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 }, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index fdda0f8c5a19..9d93f4c27300 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -2586,6 +2586,61 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op) return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL); } +void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp, + unsigned int *pif_req_wrptr, + unsigned int *pif_rsp_wrptr) +{ + int i, j; + u32 cfg, val, req, rsp; + + cfg = t4_read_reg(adap, CIM_DEBUGCFG_A); + if (cfg & LADBGEN_F) + t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F); + + val = t4_read_reg(adap, CIM_DEBUGSTS_A); + req = POLADBGWRPTR_G(val); + rsp = PILADBGWRPTR_G(val); + if (pif_req_wrptr) + *pif_req_wrptr = req; + if (pif_rsp_wrptr) + *pif_rsp_wrptr = rsp; + + for (i = 0; i < CIM_PIFLA_SIZE; i++) { + for (j = 0; j < 6; j++) { + t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) | + PILADBGRDPTR_V(rsp)); + *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A); + *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A); + req++; + rsp++; + } + req = (req + 2) & POLADBGRDPTR_M; + rsp = (rsp + 2) & PILADBGRDPTR_M; + } + t4_write_reg(adap, CIM_DEBUGCFG_A, cfg); +} + +void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp) +{ + u32 cfg; + int i, j, idx; + + cfg = t4_read_reg(adap, CIM_DEBUGCFG_A); + if (cfg & LADBGEN_F) + t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F); + + for (i = 0; i < CIM_MALA_SIZE; i++) { + for (j = 0; j < 5; j++) { + idx = 8 * i + j; + t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) | + PILADBGRDPTR_V(idx)); + *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A); + *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A); + } + } + t4_write_reg(adap, CIM_DEBUGCFG_A, cfg); +} + void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf) { unsigned int i, j; @@ -4136,6 +4191,52 @@ void t4_load_mtus(struct adapter *adap, const unsigned short *mtus, } } +/* Calculates a rate in bytes/s given the number of 256-byte units per 4K core + * clocks. The formula is + * + * bytes/s = bytes256 * 256 * ClkFreq / 4096 + * + * which is equivalent to + * + * bytes/s = 62.5 * bytes256 * ClkFreq_ms + */ +static u64 chan_rate(struct adapter *adap, unsigned int bytes256) +{ + u64 v = bytes256 * adap->params.vpd.cclk; + + return v * 62 + v / 2; +} + +/** + * t4_get_chan_txrate - get the current per channel Tx rates + * @adap: the adapter + * @nic_rate: rates for NIC traffic + * @ofld_rate: rates for offloaded traffic + * + * Return the current Tx rates in bytes/s for NIC and offloaded traffic + * for each channel. + */ +void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate) +{ + u32 v; + + v = t4_read_reg(adap, TP_TX_TRATE_A); + nic_rate[0] = chan_rate(adap, TNLRATE0_G(v)); + nic_rate[1] = chan_rate(adap, TNLRATE1_G(v)); + if (adap->params.arch.nchan == NCHAN) { + nic_rate[2] = chan_rate(adap, TNLRATE2_G(v)); + nic_rate[3] = chan_rate(adap, TNLRATE3_G(v)); + } + + v = t4_read_reg(adap, TP_TX_ORATE_A); + ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v)); + ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v)); + if (adap->params.arch.nchan == NCHAN) { + ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v)); + ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v)); + } +} + /** * t4_pmtx_get_stats - returns the HW stats from PMTX * @adap: the adapter diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h index f9a2cb164737..c8488f430d19 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h @@ -59,6 +59,8 @@ enum { CIM_NUM_OBQ = 6, /* # of CIM OBQs */ CIM_NUM_OBQ_T5 = 8, /* # of CIM OBQs for T5 adapter */ CIMLA_SIZE = 2048, /* # of 32-bit words in CIM LA */ + CIM_PIFLA_SIZE = 64, /* # of 192-bit words in CIM PIF LA */ + CIM_MALA_SIZE = 64, /* # of 160-bit words in CIM MA LA */ CIM_IBQ_SIZE = 128, /* # of 128-bit words in a CIM IBQ */ CIM_OBQ_SIZE = 128, /* # of 128-bit words in a CIM OBQ */ TPLA_SIZE = 128, /* # of 64-bit words in TP LA */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index af3462db5adb..375a825573b0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h @@ -1361,6 +1361,42 @@ #define FLMTXFLSTEMPTY_V(x) ((x) << FLMTXFLSTEMPTY_S) #define FLMTXFLSTEMPTY_F FLMTXFLSTEMPTY_V(1U) +#define TP_TX_ORATE_A 0x7ebc + +#define OFDRATE3_S 24 +#define OFDRATE3_M 0xffU +#define OFDRATE3_G(x) (((x) >> OFDRATE3_S) & OFDRATE3_M) + +#define OFDRATE2_S 16 +#define OFDRATE2_M 0xffU +#define OFDRATE2_G(x) (((x) >> OFDRATE2_S) & OFDRATE2_M) + +#define OFDRATE1_S 8 +#define OFDRATE1_M 0xffU +#define OFDRATE1_G(x) (((x) >> OFDRATE1_S) & OFDRATE1_M) + +#define OFDRATE0_S 0 +#define OFDRATE0_M 0xffU +#define OFDRATE0_G(x) (((x) >> OFDRATE0_S) & OFDRATE0_M) + +#define TP_TX_TRATE_A 0x7ed0 + +#define TNLRATE3_S 24 +#define TNLRATE3_M 0xffU +#define TNLRATE3_G(x) (((x) >> TNLRATE3_S) & TNLRATE3_M) + +#define TNLRATE2_S 16 +#define TNLRATE2_M 0xffU +#define TNLRATE2_G(x) (((x) >> TNLRATE2_S) & TNLRATE2_M) + +#define TNLRATE1_S 8 +#define TNLRATE1_M 0xffU +#define TNLRATE1_G(x) (((x) >> TNLRATE1_S) & TNLRATE1_M) + +#define TNLRATE0_S 0 +#define TNLRATE0_M 0xffU +#define TNLRATE0_G(x) (((x) >> TNLRATE0_S) & TNLRATE0_M) + #define TP_VLAN_PRI_MAP_A 0x140 #define FRAGMENTATION_S 9 @@ -2759,6 +2795,33 @@ #define CIM_IBQ_DBG_DATA_A 0x7b68 #define CIM_OBQ_DBG_DATA_A 0x7b6c +#define CIM_DEBUGCFG_A 0x7b70 +#define CIM_DEBUGSTS_A 0x7b74 + +#define POLADBGRDPTR_S 23 +#define POLADBGRDPTR_M 0x1ffU +#define POLADBGRDPTR_V(x) ((x) << POLADBGRDPTR_S) + +#define POLADBGWRPTR_S 16 +#define POLADBGWRPTR_M 0x1ffU +#define POLADBGWRPTR_G(x) (((x) >> POLADBGWRPTR_S) & POLADBGWRPTR_M) + +#define PILADBGRDPTR_S 14 +#define PILADBGRDPTR_M 0x1ffU +#define PILADBGRDPTR_V(x) ((x) << PILADBGRDPTR_S) + +#define PILADBGWRPTR_S 0 +#define PILADBGWRPTR_M 0x1ffU +#define PILADBGWRPTR_G(x) (((x) >> PILADBGWRPTR_S) & PILADBGWRPTR_M) + +#define LADBGEN_S 12 +#define LADBGEN_V(x) ((x) << LADBGEN_S) +#define LADBGEN_F LADBGEN_V(1U) + +#define CIM_PO_LA_DEBUGDATA_A 0x7b78 +#define CIM_PI_LA_DEBUGDATA_A 0x7b7c +#define CIM_PO_LA_MADEBUGDATA_A 0x7b80 +#define CIM_PI_LA_MADEBUGDATA_A 0x7b84 #define UPDBGLARDEN_S 1 #define UPDBGLARDEN_V(x) ((x) << UPDBGLARDEN_S) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 636f9e350162..ac3ac2a20386 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -643,6 +643,7 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) +#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 824a7ab79ab6..65db69b862fb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c @@ -1225,7 +1225,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = { .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, - .check_overtemp = &ixgbe_tn_check_overtemp, + .check_overtemp = &ixgbe_tn_check_overtemp, }; struct ixgbe_info ixgbe_82598_info = { @@ -1234,4 +1234,5 @@ struct ixgbe_info ixgbe_82598_info = { .mac_ops = &mac_ops_82598, .eeprom_ops = &eeprom_ops_82598, .phy_ops = &phy_ops_82598, + .mvals = ixgbe_mvals_8259X, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e0c363948bf4..6b87d9634614 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -71,7 +71,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw) { u32 fwsm, manc, factps; - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) return false; @@ -79,7 +79,7 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw) if (!(manc & IXGBE_MANC_RCV_TCO_EN)) return false; - factps = IXGBE_READ_REG(hw, IXGBE_FACTPS); + factps = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (factps & IXGBE_FACTPS_MNGCG) return false; @@ -510,7 +510,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); /* Check to see if MNG FW could be enabled */ - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) && !hw->wol_enabled && @@ -2378,4 +2378,5 @@ struct ixgbe_info ixgbe_82599_info = { .eeprom_ops = &eeprom_ops_82599, .phy_ops = &phy_ops_82599, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_8259X, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 06d8f3cfa099..4c1c26732b67 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c @@ -57,6 +57,11 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, u16 offset); static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); +/* Base table for registers values that change by MAC */ +const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(8259X) +}; + /** * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow * control @@ -91,6 +96,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) case IXGBE_DEV_ID_82599_T3_LOM: case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550EM_X_10G_T: supported = true; break; default: @@ -463,7 +470,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) } } - if (hw->mac.type == ixgbe_mac_X540) { + if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { if (hw->phy.id == 0) hw->phy.ops.identify(hw); hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); @@ -681,7 +688,7 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) bus->lan_id = bus->func; /* check for a port swap */ - reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); + reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; } @@ -799,7 +806,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) * Check for EEPROM present first. * If not present leave as none */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (eec & IXGBE_EEC_PRES) { eeprom->type = ixgbe_eeprom_spi; @@ -1283,14 +1290,14 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) return IXGBE_ERR_SWFW_SYNC; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* Request EEPROM Access */ eec |= IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (eec & IXGBE_EEC_GNT) break; udelay(5); @@ -1299,7 +1306,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) /* Release if grant not acquired */ if (!(eec & IXGBE_EEC_GNT)) { eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); hw_dbg(hw, "Could not acquire EEPROM grant\n"); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); @@ -1309,7 +1316,7 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) /* Setup EEPROM for Read/Write */ /* Clear CS and SK */ eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); return 0; @@ -1333,7 +1340,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) break; usleep_range(50, 100); @@ -1353,7 +1360,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) * If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SMBI) { hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); return IXGBE_ERR_EEPROM; @@ -1362,16 +1369,16 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) /* Now get the semaphore between SW/FW through the SWESMBI bit */ for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); /* Set the SW EEPROM semaphore bit to request access */ swsm |= IXGBE_SWSM_SWESMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); /* If we set the bit successfully then we got the * semaphore. */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (swsm & IXGBE_SWSM_SWESMBI) break; @@ -1400,11 +1407,11 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) { u32 swsm; - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ swsm &= ~(IXGBE_SWSM_SWESMBI | IXGBE_SWSM_SMBI); - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); IXGBE_WRITE_FLUSH(hw); } @@ -1454,15 +1461,15 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* Toggle CS to flush commands */ eec |= IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); eec &= ~IXGBE_EEC_CS; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } @@ -1480,7 +1487,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, u32 mask; u32 i; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); /* * Mask is used to shift "count" bits of "data" out to the EEPROM @@ -1501,7 +1508,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, else eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); @@ -1518,7 +1525,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, /* We leave the "DI" bit set to "0" when we leave this routine. */ eec &= ~IXGBE_EEC_DI; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); } @@ -1539,7 +1546,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) * the value of the "DO" bit. During this "shifting in" process the * "DI" bit should always be clear. */ - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); @@ -1547,7 +1554,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) data = data << 1; ixgbe_raise_eeprom_clk(hw, &eec); - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec &= ~(IXGBE_EEC_DI); if (eec & IXGBE_EEC_DO) @@ -1571,7 +1578,7 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) * (setting the SK bit), then delay */ *eec = *eec | IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } @@ -1588,7 +1595,7 @@ static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) * delay */ *eec = *eec & ~IXGBE_EEC_SK; - IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), *eec); IXGBE_WRITE_FLUSH(hw); udelay(1); } @@ -1601,19 +1608,19 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw) { u32 eec; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eec |= IXGBE_EEC_CS; /* Pull CS high */ eec &= ~IXGBE_EEC_SK; /* Lower SCK */ - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); IXGBE_WRITE_FLUSH(hw); udelay(1); /* Stop requesting EEPROM access */ eec &= ~IXGBE_EEC_REQ; - IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), eec); hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index f21f8a165ec4..ec015fed8fa7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -118,6 +118,8 @@ bool ixgbe_mng_enabled(struct ixgbe_hw *hw); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, int strategy); +extern const u32 ixgbe_mvals_8259X[IXGBE_MVALS_IDX_LIMIT]; + #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 #define IXGBE_EMC_INTERNAL_DATA 0x00 #define IXGBE_EMC_INTERNAL_THERM_LIMIT 0x20 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 9a1d0f142b09..ec7b2324b77b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -207,6 +207,7 @@ static int ixgbe_get_settings(struct net_device *netdev, switch (adapter->hw.phy.type) { case ixgbe_phy_tn: case ixgbe_phy_aq: + case ixgbe_phy_x550em_ext_t: case ixgbe_phy_cu_unknown: ecmd->supported |= SUPPORTED_TP; ecmd->advertising |= ADVERTISED_TP; @@ -470,16 +471,16 @@ static void ixgbe_get_regs(struct net_device *netdev, regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); /* NVM Register */ - regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); + regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); - regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); + regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA(hw)); regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); - regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); + regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC(hw)); /* Interrupt */ /* don't read EICR because it can clear interrupt causes, instead diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 23d82b34314e..3bf2f3cfd9f6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -81,6 +81,8 @@ const char ixgbe_driver_version[] = DRV_VERSION; static const char ixgbe_copyright[] = "Copyright (c) 1999-2014 Intel Corporation."; +static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter"; + static const struct ixgbe_info *ixgbe_info_tbl[] = { [board_82598] = &ixgbe_82598_info, [board_82599] = &ixgbe_82599_info, @@ -131,6 +133,7 @@ static const struct pci_device_id ixgbe_pci_tbl[] = { {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x}, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x}, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x}, /* required last entry */ {0, } }; @@ -2366,7 +2369,7 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) * - We may have missed the interrupt so always have to * check if we got a LSC */ - if (!(eicr & IXGBE_EICR_GPI_SDP0) && + if (!(eicr & IXGBE_EICR_GPI_SDP0_8259X) && !(eicr & IXGBE_EICR_LSC)) return; @@ -2386,14 +2389,13 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) break; default: - if (!(eicr & IXGBE_EICR_GPI_SDP0)) + if (adapter->hw.mac.type >= ixgbe_mac_X540) + return; + if (!(eicr & IXGBE_EICR_GPI_SDP0(hw))) return; break; } - e_crit(drv, - "Network adapter has been stopped because it has over heated. " - "Restart the computer. If the problem persists, " - "power off the system and replace the adapter\n"); + e_crit(drv, "%s\n", ixgbe_overheat_msg); adapter->interrupt_event = 0; } @@ -2403,15 +2405,17 @@ static void ixgbe_check_fan_failure(struct ixgbe_adapter *adapter, u32 eicr) struct ixgbe_hw *hw = &adapter->hw; if ((adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) && - (eicr & IXGBE_EICR_GPI_SDP1)) { + (eicr & IXGBE_EICR_GPI_SDP1(hw))) { e_crit(probe, "Fan has stopped, replace the adapter\n"); /* write to clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); } } static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) { + struct ixgbe_hw *hw = &adapter->hw; + if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)) return; @@ -2421,7 +2425,8 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) * Need to check link state so complete overtemp check * on service task */ - if (((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)) && + if (((eicr & IXGBE_EICR_GPI_SDP0(hw)) || + (eicr & IXGBE_EICR_LSC)) && (!test_bit(__IXGBE_DOWN, &adapter->state))) { adapter->interrupt_event = eicr; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; @@ -2437,28 +2442,46 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) return; } - e_crit(drv, - "Network adapter has been stopped because it has over heated. " - "Restart the computer. If the problem persists, " - "power off the system and replace the adapter\n"); + e_crit(drv, "%s\n", ixgbe_overheat_msg); +} + +static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +{ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + if (hw->phy.type == ixgbe_phy_nl) + return true; + return false; + case ixgbe_mac_82599EB: + case ixgbe_mac_X550EM_x: + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + case ixgbe_media_type_fiber_qsfp: + return true; + default: + return false; + } + default: + return false; + } } static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) { struct ixgbe_hw *hw = &adapter->hw; - if (eicr & IXGBE_EICR_GPI_SDP2) { + if (eicr & IXGBE_EICR_GPI_SDP2(hw)) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; ixgbe_service_event_schedule(adapter); } } - if (eicr & IXGBE_EICR_GPI_SDP1) { + if (eicr & IXGBE_EICR_GPI_SDP1(hw)) { /* Clear the interrupt */ - IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1(hw)); if (!test_bit(__IXGBE_DOWN, &adapter->state)) { adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; ixgbe_service_event_schedule(adapter); @@ -2543,6 +2566,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, bool flush) { + struct ixgbe_hw *hw = &adapter->hw; u32 mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE); /* don't reenable LSC while waiting for link */ @@ -2552,7 +2576,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP0; + mask |= IXGBE_EIMS_GPI_SDP0(hw); break; case ixgbe_mac_X540: case ixgbe_mac_X550: @@ -2563,15 +2587,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, break; } if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP1(hw); switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - mask |= IXGBE_EIMS_GPI_SDP1; - mask |= IXGBE_EIMS_GPI_SDP2; + mask |= IXGBE_EIMS_GPI_SDP1(hw); + mask |= IXGBE_EIMS_GPI_SDP2(hw); /* fall through */ case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) + mask |= IXGBE_EICR_GPI_SDP0_X540; mask |= IXGBE_EIMS_ECC; mask |= IXGBE_EIMS_MAILBOX; break; @@ -2626,6 +2652,13 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: + if (hw->phy.type == ixgbe_phy_x550em_ext_t && + (eicr & IXGBE_EICR_GPI_SDP0_X540)) { + adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, + IXGBE_EICR_GPI_SDP0_X540); + } if (eicr & IXGBE_EICR_ECC) { e_info(link, "Received ECC Err, initiating reset\n"); adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; @@ -4703,32 +4736,6 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) ixgbe_configure_dfwd(adapter); } -static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) -{ - switch (hw->phy.type) { - case ixgbe_phy_sfp_avago: - case ixgbe_phy_sfp_ftl: - case ixgbe_phy_sfp_intel: - case ixgbe_phy_sfp_unknown: - case ixgbe_phy_sfp_passive_tyco: - case ixgbe_phy_sfp_passive_unknown: - case ixgbe_phy_sfp_active_unknown: - case ixgbe_phy_sfp_ftl_active: - case ixgbe_phy_qsfp_passive_unknown: - case ixgbe_phy_qsfp_active_unknown: - case ixgbe_phy_qsfp_intel: - case ixgbe_phy_qsfp_unknown: - /* ixgbe_phy_none is set when no SFP module is present */ - case ixgbe_phy_none: - return true; - case ixgbe_phy_nl: - if (hw->mac.type == ixgbe_mac_82598EB) - return true; - default: - return false; - } -} - /** * ixgbe_sfp_link_config - set up SFP+ link * @adapter: pointer to private adapter struct @@ -4833,7 +4840,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: - gpie |= IXGBE_SDP0_GPIEN; + gpie |= IXGBE_SDP0_GPIEN_8259X; break; case ixgbe_mac_X540: gpie |= IXGBE_EIMS_TS; @@ -4845,11 +4852,11 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) /* Enable fan failure interrupt */ if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) - gpie |= IXGBE_SDP1_GPIEN; + gpie |= IXGBE_SDP1_GPIEN(hw); if (hw->mac.type == ixgbe_mac_82599EB) { - gpie |= IXGBE_SDP1_GPIEN; - gpie |= IXGBE_SDP2_GPIEN; + gpie |= IXGBE_SDP1_GPIEN_8259X; + gpie |= IXGBE_SDP2_GPIEN_8259X; } IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); @@ -4873,6 +4880,9 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) if (hw->mac.ops.enable_tx_laser) hw->mac.ops.enable_tx_laser(hw); + if (hw->phy.ops.set_phy_power) + hw->phy.ops.set_phy_power(hw, true); + smp_mb__before_atomic(); clear_bit(__IXGBE_DOWN, &adapter->state); ixgbe_napi_enable_all(adapter); @@ -4992,6 +5002,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) ixgbe_ptp_reset(adapter); + + if (hw->phy.ops.set_phy_power) { + if (!netif_running(adapter->netdev) && !adapter->wol) + hw->phy.ops.set_phy_power(hw, false); + else + hw->phy.ops.set_phy_power(hw, true); + } } /** @@ -5260,7 +5277,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; case ixgbe_mac_X540: - fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM(hw)); if (fwsm & IXGBE_FWSM_TS_ENABLED) adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; break; @@ -5672,6 +5689,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) static int ixgbe_open(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; int err, queues; /* disallow open during test */ @@ -5729,6 +5747,8 @@ err_set_queues: ixgbe_free_irq(adapter); err_req_irq: ixgbe_free_all_rx_resources(adapter); + if (hw->phy.ops.set_phy_power && !adapter->wol) + hw->phy.ops.set_phy_power(&adapter->hw, false); err_setup_rx: ixgbe_free_all_tx_resources(adapter); err_setup_tx: @@ -5889,6 +5909,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) } *enable_wake = !!wufc; + if (hw->phy.ops.set_phy_power && !*enable_wake) + hw->phy.ops.set_phy_power(hw, false); ixgbe_release_hw_control(adapter); @@ -6718,6 +6740,26 @@ static void ixgbe_service_timer(unsigned long data) ixgbe_service_event_schedule(adapter); } +static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 status; + + if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; + + if (!hw->phy.ops.handle_lasi) + return; + + status = hw->phy.ops.handle_lasi(&adapter->hw); + if (status != IXGBE_ERR_OVERTEMP) + return; + + e_crit(drv, "%s\n", ixgbe_overheat_msg); +} + static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) { if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) @@ -6759,6 +6801,7 @@ static void ixgbe_service_task(struct work_struct *work) return; } ixgbe_reset_subtask(adapter); + ixgbe_phy_interrupt_subtask(adapter); ixgbe_sfp_detection_subtask(adapter); ixgbe_sfp_link_config_subtask(adapter); ixgbe_check_overtemp_subtask(adapter); @@ -8291,6 +8334,10 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, break; case IXGBE_DEV_ID_X540T: case IXGBE_DEV_ID_X540T1: + case IXGBE_DEV_ID_X550T: + case IXGBE_DEV_ID_X550EM_X_KX4: + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_X_10G_T: /* check eeprom to see if enabled wol */ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && @@ -8431,10 +8478,11 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup hw api */ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); hw->mac.type = ii->mac; + hw->mvals = ii->mvals; /* EEPROM */ memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (ixgbe_removed(hw->hw_addr)) { err = -EIO; goto err_ioremap; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index af828f89419f..526a20bf7488 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -347,6 +347,7 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case TN1010_PHY_ID: phy_type = ixgbe_phy_tn; break; + case X550_PHY_ID: case X540_PHY_ID: phy_type = ixgbe_phy_aq; break; @@ -356,6 +357,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case ATH_PHY_ID: phy_type = ixgbe_phy_nl; break; + case X557_PHY_ID: + phy_type = ixgbe_phy_x550em_ext_t; + break; default: phy_type = ixgbe_phy_unknown; break; @@ -1348,6 +1352,9 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) return IXGBE_ERR_SFP_NOT_PRESENT; } + /* LAN ID is needed for sfp_type determination */ + hw->mac.ops.set_lan_id(hw); + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, &identifier); @@ -1361,9 +1368,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) hw->phy.id = identifier; - /* LAN ID is needed for sfp_type determination */ - hw->mac.ops.set_lan_id(hw); - status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); @@ -1793,7 +1797,7 @@ fail: **/ static void ixgbe_i2c_start(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); /* Start condition must begin with data and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 1); @@ -1822,7 +1826,7 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) **/ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); /* Stop condition must begin with data low and clock high */ ixgbe_set_i2c_data(hw, &i2cctl, 0); @@ -1880,9 +1884,9 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) } /* Release SDA line (set high) */ - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + i2cctl |= IXGBE_I2C_DATA_OUT(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), i2cctl); IXGBE_WRITE_FLUSH(hw); return status; @@ -1898,7 +1902,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) { s32 status = 0; u32 i = 0; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 timeout = 10; bool ack = true; @@ -1911,7 +1915,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) /* Poll for ACK. Note that ACK in I2C spec is * transition from 1 to 0 */ for (i = 0; i < timeout; i++) { - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ack = ixgbe_get_i2c_data(hw, &i2cctl); udelay(1); @@ -1941,14 +1945,14 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) **/ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); ixgbe_raise_i2c_clk(hw, &i2cctl); /* Minimum high period of clock is 4us */ udelay(IXGBE_I2C_T_HIGH); - i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); *data = ixgbe_get_i2c_data(hw, &i2cctl); ixgbe_lower_i2c_clk(hw, &i2cctl); @@ -1969,7 +1973,7 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) { s32 status; - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); status = ixgbe_set_i2c_data(hw, &i2cctl, data); if (status == 0) { @@ -2005,14 +2009,14 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) u32 i2cctl_r = 0; for (i = 0; i < timeout; i++) { - *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + *i2cctl |= IXGBE_I2C_CLK_OUT(hw); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL rise time (1000ns) */ udelay(IXGBE_I2C_T_RISE); - i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); - if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) + i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); + if (i2cctl_r & IXGBE_I2C_CLK_IN(hw)) break; } } @@ -2027,9 +2031,9 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) { - *i2cctl &= ~IXGBE_I2C_CLK_OUT_BY_MAC(hw); + *i2cctl &= ~IXGBE_I2C_CLK_OUT(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* SCL fall time (300ns) */ @@ -2047,18 +2051,18 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) { if (data) - *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + *i2cctl |= IXGBE_I2C_DATA_OUT(hw); else - *i2cctl &= ~IXGBE_I2C_DATA_OUT_BY_MAC(hw); + *i2cctl &= ~IXGBE_I2C_DATA_OUT(hw); - IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_REG(hw, IXGBE_I2CCTL(hw), *i2cctl); IXGBE_WRITE_FLUSH(hw); /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); /* Verify data was set correctly */ - *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); if (data != ixgbe_get_i2c_data(hw, i2cctl)) { hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); return IXGBE_ERR_I2C; @@ -2076,7 +2080,7 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) **/ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) { - if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) + if (*i2cctl & IXGBE_I2C_DATA_IN(hw)) return true; return false; } @@ -2090,7 +2094,7 @@ static bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) **/ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) { - u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL(hw)); u32 i; ixgbe_i2c_start(hw); @@ -2137,3 +2141,36 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) return IXGBE_ERR_OVERTEMP; } + +/** ixgbe_set_copper_phy_power - Control power for copper phy + * @hw: pointer to hardware structure + * @on: true for on, false for off + **/ +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) +{ + u32 status; + u16 reg; + + /* Bail if we don't have copper phy */ + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return 0; + + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + if (on) { + reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } else { + if (ixgbe_check_reset_blocked(hw)) + return 0; + reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; + } + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + return status; +} diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 434643881287..e45988c4dad5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -145,6 +145,7 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, u16 *firmware_version); s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); +s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index dd6ba5916dfe..b6f424f3b1a8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -91,14 +91,24 @@ #define IXGBE_DEV_ID_X550_VF 0x1565 #define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 +#define IXGBE_CAT(r, m) IXGBE_##r##_##m + +#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, IDX)]) + /* General Registers */ #define IXGBE_CTRL 0x00000 #define IXGBE_STATUS 0x00008 #define IXGBE_CTRL_EXT 0x00018 #define IXGBE_ESDP 0x00020 #define IXGBE_EODSDP 0x00028 -#define IXGBE_I2CCTL_BY_MAC(_hw)((((_hw)->mac.type >= ixgbe_mac_X550) ? \ - 0x15F5C : 0x00028)) + +#define IXGBE_I2CCTL_8259X 0x00028 +#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_8259X +#define IXGBE_I2CCTL_X550 0x15F5C +#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 +#define IXGBE_I2CCTL(_hw) IXGBE_BY_MAC((_hw), I2CCTL) + #define IXGBE_LEDCTL 0x00200 #define IXGBE_FRTIMER 0x00048 #define IXGBE_TCPTIMER 0x0004C @@ -106,17 +116,39 @@ #define IXGBE_EXVET 0x05078 /* NVM Registers */ -#define IXGBE_EEC 0x10010 +#define IXGBE_EEC_8259X 0x10010 +#define IXGBE_EEC_X540 IXGBE_EEC_8259X +#define IXGBE_EEC_X550 IXGBE_EEC_8259X +#define IXGBE_EEC_X550EM_x IXGBE_EEC_8259X +#define IXGBE_EEC_X550EM_a 0x15FF8 +#define IXGBE_EEC(_hw) IXGBE_BY_MAC((_hw), EEC) #define IXGBE_EERD 0x10014 #define IXGBE_EEWR 0x10018 -#define IXGBE_FLA 0x1001C +#define IXGBE_FLA_8259X 0x1001C +#define IXGBE_FLA_X540 IXGBE_FLA_8259X +#define IXGBE_FLA_X550 IXGBE_FLA_8259X +#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X +#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA) #define IXGBE_EEMNGCTL 0x10110 #define IXGBE_EEMNGDATA 0x10114 #define IXGBE_FLMNGCTL 0x10118 #define IXGBE_FLMNGDATA 0x1011C #define IXGBE_FLMNGCNT 0x10120 #define IXGBE_FLOP 0x1013C -#define IXGBE_GRC 0x10200 +#define IXGBE_GRC_8259X 0x10200 +#define IXGBE_GRC_X540 IXGBE_GRC_8259X +#define IXGBE_GRC_X550 IXGBE_GRC_8259X +#define IXGBE_GRC_X550EM_x IXGBE_GRC_8259X +#define IXGBE_GRC_X550EM_a 0x15F64 +#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC) + +#define IXGBE_SRAMREL_8259X 0x10210 +#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X +#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X +#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X +#define IXGBE_SRAMREL_X550EM_a 0x15F6C +#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL) /* General Receive Control */ #define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ @@ -126,14 +158,55 @@ #define IXGBE_VPDDIAG1 0x10208 /* I2CCTL Bit Masks */ -#define IXGBE_I2C_CLK_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00004000 : 0x00000001) -#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00000200 : 0x00000002) -#define IXGBE_I2C_DATA_IN_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00001000 : 0x00000004) -#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw)(((_hw)->mac.type) >= ixgbe_mac_X550 ? \ - 0x00000400 : 0x00000008) +#define IXGBE_I2C_CLK_IN_8259X 0x00000001 +#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN_8259X +#define IXGBE_I2C_CLK_IN_X550 0x00004000 +#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 +#define IXGBE_I2C_CLK_IN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) + +#define IXGBE_I2C_CLK_OUT_8259X 0x00000002 +#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT_8259X +#define IXGBE_I2C_CLK_OUT_X550 0x00000200 +#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 +#define IXGBE_I2C_CLK_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) + +#define IXGBE_I2C_DATA_IN_8259X 0x00000004 +#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN_8259X +#define IXGBE_I2C_DATA_IN_X550 0x00001000 +#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 +#define IXGBE_I2C_DATA_IN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) + +#define IXGBE_I2C_DATA_OUT_8259X 0x00000008 +#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT_8259X +#define IXGBE_I2C_DATA_OUT_X550 0x00000400 +#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 +#define IXGBE_I2C_DATA_OUT(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) + +#define IXGBE_I2C_DATA_OE_N_EN_8259X 0 +#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN_8259X +#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 +#define IXGBE_I2C_DATA_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) + +#define IXGBE_I2C_BB_EN_8259X 0 +#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN_8259X +#define IXGBE_I2C_BB_EN_X550 0x00000100 +#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 +#define IXGBE_I2C_BB_EN(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) + +#define IXGBE_I2C_CLK_OE_N_EN_8259X 0 +#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN_8259X +#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 +#define IXGBE_I2C_CLK_OE_N_EN(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) + #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 @@ -835,15 +908,36 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_GSCN_1 0x11024 #define IXGBE_GSCN_2 0x11028 #define IXGBE_GSCN_3 0x1102C -#define IXGBE_FACTPS 0x10150 +#define IXGBE_FACTPS_8259X 0x10150 +#define IXGBE_FACTPS_X540 IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550 IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS_8259X +#define IXGBE_FACTPS_X550EM_a 0x15FEC +#define IXGBE_FACTPS(_hw) IXGBE_BY_MAC((_hw), FACTPS) + #define IXGBE_PCIEANACTL 0x11040 -#define IXGBE_SWSM 0x10140 -#define IXGBE_FWSM 0x10148 +#define IXGBE_SWSM_8259X 0x10140 +#define IXGBE_SWSM_X540 IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550 IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550EM_x IXGBE_SWSM_8259X +#define IXGBE_SWSM_X550EM_a 0x15F70 +#define IXGBE_SWSM(_hw) IXGBE_BY_MAC((_hw), SWSM) +#define IXGBE_FWSM_8259X 0x10148 +#define IXGBE_FWSM_X540 IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550 IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550EM_x IXGBE_FWSM_8259X +#define IXGBE_FWSM_X550EM_a 0x15F74 +#define IXGBE_FWSM(_hw) IXGBE_BY_MAC((_hw), FWSM) #define IXGBE_GSSR 0x10160 #define IXGBE_MREVID 0x11064 #define IXGBE_DCA_ID 0x11070 #define IXGBE_DCA_CTRL 0x11074 -#define IXGBE_SWFW_SYNC IXGBE_GSSR +#define IXGBE_SWFW_SYNC_8259X IXGBE_GSSR +#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC_8259X +#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 +#define IXGBE_SWFW_SYNC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) /* PCIe registers 82599-specific */ #define IXGBE_GCR_EXT 0x11050 @@ -855,14 +949,21 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_PHYDAT_82599 0x11044 #define IXGBE_PHYCTL_82599 0x11048 #define IXGBE_PBACLR_82599 0x11068 -#define IXGBE_CIAA_82599 0x11088 -#define IXGBE_CIAD_82599 0x1108C -#define IXGBE_CIAA_X550 0x11508 -#define IXGBE_CIAD_X550 0x11510 -#define IXGBE_CIAA_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ - IXGBE_CIAA_X550 : IXGBE_CIAA_82599)) -#define IXGBE_CIAD_BY_MAC(_hw) ((((_hw)->mac.type >= ixgbe_mac_X550) ? \ - IXGBE_CIAD_X550 : IXGBE_CIAD_82599)) + +#define IXGBE_CIAA_8259X 0x11088 +#define IXGBE_CIAA_X540 IXGBE_CIAA_8259X +#define IXGBE_CIAA_X550 0x11508 +#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 +#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 +#define IXGBE_CIAA(_hw) IXGBE_BY_MAC((_hw), CIAA) + +#define IXGBE_CIAD_8259X 0x1108C +#define IXGBE_CIAD_X540 IXGBE_CIAD_8259X +#define IXGBE_CIAD_X550 0x11510 +#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 +#define IXGBE_CIAD(_hw) IXGBE_BY_MAC((_hw), CIAD) + #define IXGBE_PICAUSE 0x110B0 #define IXGBE_PIENA 0x110B8 #define IXGBE_CDQ_MBR_82599 0x110B4 @@ -1204,18 +1305,37 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ #define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ #define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ +#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ +#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ #define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ #define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ #define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ +#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ + #define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ #define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ #define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ #define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ +#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ +#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ +#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ +/* autoneg vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 +#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ +#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ +#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ +#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Stat Reg */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ +#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ #define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Tx Dis Reg */ #define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Tx Dis */ @@ -1233,6 +1353,8 @@ struct ixgbe_thermal_sensor_data { #define TN1010_PHY_ID 0x00A19410 #define TNX_FW_REV 0xB #define X540_PHY_ID 0x01540200 +#define X550_PHY_ID 0x01540220 +#define X557_PHY_ID 0x01540240 #define QT2022_PHY_ID 0x0043A400 #define ATH_PHY_ID 0x03429050 #define AQ_FW_REV 0x20 @@ -1253,9 +1375,25 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_CONTROL_SOL_NL 0x0000 /* General purpose Interrupt Enable */ -#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ -#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ -#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_8259X 0x00000001 /* SDP0 */ +#define IXGBE_SDP1_GPIEN_8259X 0x00000002 /* SDP1 */ +#define IXGBE_SDP2_GPIEN_8259X 0x00000004 /* SDP2 */ +#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ +#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ +#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ +#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 +#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 +#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 +#define IXGBE_SDP0_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) +#define IXGBE_SDP1_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) +#define IXGBE_SDP2_GPIEN(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) + #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ @@ -1417,9 +1555,25 @@ enum { #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ #define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ #define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ -#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ -#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ -#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_8259X 0x01000000 /* Gen Purpose INT on SDP0 */ +#define IXGBE_EICR_GPI_SDP1_8259X 0x02000000 /* Gen Purpose INT on SDP1 */ +#define IXGBE_EICR_GPI_SDP2_8259X 0x04000000 /* Gen Purpose INT on SDP2 */ +#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 +#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 +#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 +#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 +#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 +#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 +#define IXGBE_EICR_GPI_SDP0(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) +#define IXGBE_EICR_GPI_SDP1(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) +#define IXGBE_EICR_GPI_SDP2(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) + #define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ @@ -1435,9 +1589,9 @@ enum { #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EICS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EICS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EICS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) #define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ @@ -1454,9 +1608,9 @@ enum { #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ #define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMS_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EIMS_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EIMS_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) #define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ @@ -1472,9 +1626,9 @@ enum { #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ #define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ -#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ -#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +#define IXGBE_EIMC_GPI_SDP0(_hw) IXGBE_EICR_GPI_SDP0(_hw) +#define IXGBE_EIMC_GPI_SDP1(_hw) IXGBE_EICR_GPI_SDP1(_hw) +#define IXGBE_EIMC_GPI_SDP2(_hw) IXGBE_EICR_GPI_SDP2(_hw) #define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ @@ -2741,6 +2895,37 @@ union ixgbe_atr_hash_dword { __be32 dword; }; +#define IXGBE_MVALS_INIT(m) \ + IXGBE_CAT(EEC, m), \ + IXGBE_CAT(FLA, m), \ + IXGBE_CAT(GRC, m), \ + IXGBE_CAT(SRAMREL, m), \ + IXGBE_CAT(FACTPS, m), \ + IXGBE_CAT(SWSM, m), \ + IXGBE_CAT(SWFW_SYNC, m), \ + IXGBE_CAT(FWSM, m), \ + IXGBE_CAT(SDP0_GPIEN, m), \ + IXGBE_CAT(SDP1_GPIEN, m), \ + IXGBE_CAT(SDP2_GPIEN, m), \ + IXGBE_CAT(EICR_GPI_SDP0, m), \ + IXGBE_CAT(EICR_GPI_SDP1, m), \ + IXGBE_CAT(EICR_GPI_SDP2, m), \ + IXGBE_CAT(CIAA, m), \ + IXGBE_CAT(CIAD, m), \ + IXGBE_CAT(I2C_CLK_IN, m), \ + IXGBE_CAT(I2C_CLK_OUT, m), \ + IXGBE_CAT(I2C_DATA_IN, m), \ + IXGBE_CAT(I2C_DATA_OUT, m), \ + IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ + IXGBE_CAT(I2C_BB_EN, m), \ + IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ + IXGBE_CAT(I2CCTL, m) + +enum ixgbe_mvals { + IXGBE_MVALS_INIT(IDX), + IXGBE_MVALS_IDX_LIMIT +}; + enum ixgbe_eeprom_type { ixgbe_eeprom_uninitialized = 0, ixgbe_eeprom_spi, @@ -3112,6 +3297,8 @@ struct ixgbe_phy_operations { s32 (*read_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); s32 (*write_i2c_combined)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); s32 (*check_overtemp)(struct ixgbe_hw *); + s32 (*set_phy_power)(struct ixgbe_hw *, bool on); + s32 (*handle_lasi)(struct ixgbe_hw *hw); }; struct ixgbe_eeprom_info { @@ -3173,6 +3360,7 @@ struct ixgbe_phy_info { bool multispeed_fiber; bool reset_if_overtemp; bool qsfp_shared_i2c_bus; + u32 nw_mng_if_sel; }; #include "ixgbe_mbx.h" @@ -3216,6 +3404,7 @@ struct ixgbe_hw { struct ixgbe_eeprom_info eeprom; struct ixgbe_bus_info bus; struct ixgbe_mbx_info mbx; + const u32 *mvals; u16 device_id; u16 vendor_id; u16 subsystem_device_id; @@ -3234,6 +3423,7 @@ struct ixgbe_info { struct ixgbe_eeprom_operations *eeprom_ops; struct ixgbe_phy_operations *phy_ops; struct ixgbe_mbx_operations *mbx_ops; + const u32 *mvals; }; @@ -3339,4 +3529,6 @@ struct ixgbe_info { #define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2 #define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3 +#define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24) #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c index 0a8b5e42e1a9..032a5870abd1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c @@ -202,7 +202,7 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + @@ -504,8 +504,8 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) return status; } - flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)) | IXGBE_EEC_FLUP; + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); status = ixgbe_poll_flash_update_done_X540(hw); if (status == 0) @@ -514,11 +514,11 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) hw_dbg(hw, "Flash update time out\n"); if (hw->revision_id == 0) { - flup = IXGBE_READ_REG(hw, IXGBE_EEC); + flup = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (flup & IXGBE_EEC_SEC1VAL) { flup |= IXGBE_EEC_FLUP; - IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); + IXGBE_WRITE_REG(hw, IXGBE_EEC(hw), flup); } status = ixgbe_poll_flash_update_done_X540(hw); @@ -544,7 +544,7 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) u32 reg; for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { - reg = IXGBE_READ_REG(hw, IXGBE_EEC); + reg = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); if (reg & IXGBE_EEC_FLUDONE) return 0; udelay(5); @@ -580,10 +580,10 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) if (ixgbe_get_swfw_sync_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swfw_sync & (fwmask | swmask | hwmask))) { swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); break; } else { @@ -605,13 +605,13 @@ s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) * corresponding FW/HW bits in the SW_FW_SYNC register. */ if (i >= timeout) { - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (swfw_sync & (fwmask | hwmask)) { if (ixgbe_get_swfw_sync_semaphore(hw)) return IXGBE_ERR_SWFW_SYNC; swfw_sync |= swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); } } @@ -635,9 +635,9 @@ void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) ixgbe_get_swfw_sync_semaphore(hw); - swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); swfw_sync &= ~swmask; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swfw_sync); ixgbe_release_swfw_sync_semaphore(hw); usleep_range(5000, 10000); @@ -660,7 +660,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) /* If the SMBI bit is 0 when we read it, then the bit will be * set and we have the semaphore */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); if (!(swsm & IXGBE_SWSM_SMBI)) break; usleep_range(50, 100); @@ -674,7 +674,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) /* Now get the semaphore between SW/FW through the REGSMP bit */ for (i = 0; i < timeout; i++) { - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); if (!(swsm & IXGBE_SWFW_REGSMP)) return 0; @@ -696,13 +696,13 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ - swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); + swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC(hw)); swsm &= ~IXGBE_SWFW_REGSMP; - IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC(hw), swsm); - swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM(hw)); swsm &= ~IXGBE_SWSM_SMBI; - IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); + IXGBE_WRITE_REG(hw, IXGBE_SWSM(hw), swsm); IXGBE_WRITE_FLUSH(hw); } @@ -850,9 +850,14 @@ static struct ixgbe_phy_operations phy_ops_X540 = { .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, + .set_phy_power = &ixgbe_set_copper_phy_power, .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, }; +static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X540) +}; + struct ixgbe_info ixgbe_X540_info = { .mac = ixgbe_mac_X540, .get_invariants = &ixgbe_get_invariants_X540, @@ -860,4 +865,5 @@ struct ixgbe_info ixgbe_X540_info = { .eeprom_ops = &eeprom_ops_X540, .phy_ops = &phy_ops_X540, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X540, }; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index b0236985e915..7581da13e92a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1,7 +1,7 @@ /******************************************************************************* * * Intel 10 Gigabit PCI Express Linux driver - * Copyright(c) 1999 - 2014 Intel Corporation. + * Copyright(c) 1999 - 2015 Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -26,6 +26,22 @@ #include "ixgbe_common.h" #include "ixgbe_phy.h" +/** ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control + * @hw: pointer to hardware structure + **/ +static void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) +{ + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + + if (hw->bus.lan_id) { + esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); + esdp |= IXGBE_ESDP_SDP1_DIR; + } + esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + IXGBE_WRITE_FLUSH(hw); +} + /** ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * @@ -33,18 +49,11 @@ */ static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { - u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_SFP: /* set up for CS4227 usage */ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; - if (hw->bus.lan_id) { - esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); - esdp |= IXGBE_ESDP_SDP1_DIR; - } - esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); + ixgbe_setup_mux_ctl(hw); return ixgbe_identify_module_generic(hw); case IXGBE_DEV_ID_X550EM_X_KX4: @@ -90,7 +99,7 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) eeprom->semaphore_delay = 10; eeprom->type = ixgbe_flash; - eec = IXGBE_READ_REG(hw, IXGBE_EEC); + eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw)); eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> IXGBE_EEC_SIZE_SHIFT); eeprom->word_size = 1 << (eeprom_size + @@ -704,111 +713,6 @@ static s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, return status; } -/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers - * @hw: pointer to hardware structure - **/ -static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) -{ - struct ixgbe_mac_info *mac = &hw->mac; - - /* CS4227 does not support autoneg, so disable the laser control - * functions for SFP+ fiber - */ - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { - mac->ops.disable_tx_laser = NULL; - mac->ops.enable_tx_laser = NULL; - mac->ops.flap_tx_laser = NULL; - } -} - -/** ixgbe_setup_sfp_modules_X550em - Setup SFP module - * @hw: pointer to hardware structure - */ -static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) -{ - bool setup_linear; - u16 reg_slice, edc_mode; - s32 ret_val; - - switch (hw->phy.sfp_type) { - case ixgbe_sfp_type_unknown: - return 0; - case ixgbe_sfp_type_not_present: - return IXGBE_ERR_SFP_NOT_PRESENT; - case ixgbe_sfp_type_da_cu_core0: - case ixgbe_sfp_type_da_cu_core1: - setup_linear = true; - break; - case ixgbe_sfp_type_srlr_core0: - case ixgbe_sfp_type_srlr_core1: - case ixgbe_sfp_type_da_act_lmt_core0: - case ixgbe_sfp_type_da_act_lmt_core1: - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - setup_linear = false; - break; - default: - return IXGBE_ERR_SFP_NOT_SUPPORTED; - } - - ixgbe_init_mac_link_ops_X550em(hw); - hw->phy.ops.reset = NULL; - - /* The CS4227 slice address is the base address + the port-pair reg - * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. - */ - reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); - - if (setup_linear) - edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; - else - edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; - - /* Configure CS4227 for connection type. */ - ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, - edc_mode); - - if (ret_val) - ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, - edc_mode); - - return ret_val; -} - -/** ixgbe_get_link_capabilities_x550em - Determines link capabilities - * @hw: pointer to hardware structure - * @speed: pointer to link speed - * @autoneg: true when autoneg or autotry is enabled - **/ -static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, - ixgbe_link_speed *speed, - bool *autoneg) -{ - /* SFP */ - if (hw->phy.media_type == ixgbe_media_type_fiber) { - /* CS4227 SFP must not enable auto-negotiation */ - *autoneg = false; - - if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { - *speed = IXGBE_LINK_SPEED_1GB_FULL; - return 0; - } - - /* Link capabilities are based on SFP */ - if (hw->phy.multispeed_fiber) - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - else - *speed = IXGBE_LINK_SPEED_10GB_FULL; - } else { - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; - *autoneg = true; - } - return 0; -} - /** ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register of the * IOSF device * @@ -891,7 +795,7 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) } status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status) return status; @@ -973,6 +877,417 @@ static s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) return status; } +/** + * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + * + * Setup internal/external PHY link speed based on link speed, then set + * external PHY auto advertised link speed. + * + * Returns error status for any failure + **/ +static s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait) +{ + s32 status; + ixgbe_link_speed force_speed; + + /* Setup internal/external PHY link speed to iXFI (10G), unless + * only 1G is auto advertised then setup KX link. + */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + force_speed = IXGBE_LINK_SPEED_10GB_FULL; + else + force_speed = IXGBE_LINK_SPEED_1GB_FULL; + + /* If internal link mode is XFI, then setup XFI internal link. */ + if (!(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + status = ixgbe_setup_ixfi_x550em(hw, &force_speed); + + if (status) + return status; + } + + return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); +} + +/** ixgbe_check_link_t_X550em - Determine link and speed status + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @link_up: true when link is up + * @link_up_wait_to_complete: bool used to wait for link up or not + * + * Check that both the MAC and X557 external PHY have link. + **/ +static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *link_up, + bool link_up_wait_to_complete) +{ + u32 status; + u16 autoneg_status; + + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + status = ixgbe_check_mac_link_generic(hw, speed, link_up, + link_up_wait_to_complete); + + /* If check link fails or MAC link is not up, then return */ + if (status || !(*link_up)) + return status; + + /* MAC link is up, so check external PHY link. + * Read this twice back to back to indicate current status. + */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (status) + return status; + + /* If external PHY link is not up, then indicate link not up */ + if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) + *link_up = false; + + return 0; +} + +/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers + * @hw: pointer to hardware structure + **/ +static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) +{ + struct ixgbe_mac_info *mac = &hw->mac; + + switch (mac->ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + /* CS4227 does not support autoneg, so disable the laser control + * functions for SFP+ fiber + */ + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; + mac->ops.flap_tx_laser = NULL; + break; + case ixgbe_media_type_copper: + mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; + mac->ops.check_link = ixgbe_check_link_t_X550em; + break; + default: + break; + } +} + +/** ixgbe_setup_sfp_modules_X550em - Setup SFP module + * @hw: pointer to hardware structure + */ +static s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) +{ + bool setup_linear; + u16 reg_slice, edc_mode; + s32 ret_val; + + switch (hw->phy.sfp_type) { + case ixgbe_sfp_type_unknown: + return 0; + case ixgbe_sfp_type_not_present: + return IXGBE_ERR_SFP_NOT_PRESENT; + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: + setup_linear = true; + break; + case ixgbe_sfp_type_srlr_core0: + case ixgbe_sfp_type_srlr_core1: + case ixgbe_sfp_type_da_act_lmt_core0: + case ixgbe_sfp_type_da_act_lmt_core1: + case ixgbe_sfp_type_1g_sx_core0: + case ixgbe_sfp_type_1g_sx_core1: + setup_linear = false; + break; + default: + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + + ixgbe_init_mac_link_ops_X550em(hw); + hw->phy.ops.reset = NULL; + + /* The CS4227 slice address is the base address + the port-pair reg + * offset. I.e. Slice 0 = 0x12B0 and slice 1 = 0x22B0. + */ + reg_slice = IXGBE_CS4227_SPARE24_LSB + (hw->bus.lan_id << 12); + + if (setup_linear) + edc_mode = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + edc_mode = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + + /* Configure CS4227 for connection type. */ + ret_val = hw->phy.ops.write_i2c_combined(hw, IXGBE_CS4227, reg_slice, + edc_mode); + + if (ret_val) + ret_val = hw->phy.ops.write_i2c_combined(hw, 0x80, reg_slice, + edc_mode); + + return ret_val; +} + +/** ixgbe_get_link_capabilities_x550em - Determines link capabilities + * @hw: pointer to hardware structure + * @speed: pointer to link speed + * @autoneg: true when autoneg or autotry is enabled + **/ +static s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) +{ + /* SFP */ + if (hw->phy.media_type == ixgbe_media_type_fiber) { + /* CS4227 SFP must not enable auto-negotiation */ + *autoneg = false; + + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + *speed = IXGBE_LINK_SPEED_1GB_FULL; + return 0; + } + + /* Link capabilities are based on SFP */ + if (hw->phy.multispeed_fiber) + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + else + *speed = IXGBE_LINK_SPEED_10GB_FULL; + } else { + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + *autoneg = true; + } + return 0; +} + +/** + * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause + * @hw: pointer to hardware structure + * @lsc: pointer to boolean flag which indicates whether external Base T + * PHY interrupt is lsc + * + * Determime if external Base T PHY interrupt cause is high temperature + * failure alarm or link status change. + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + **/ +static s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) +{ + u32 status; + u16 reg; + + *lsc = false; + + /* Vendor alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) + return status; + + /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status || !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT))) + return status; + + /* High temperature failure alarm triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + + if (status) + return status; + + /* If high temperature failure, then return over temp error and exit */ + if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { + /* power down the PHY in case the PHY FW didn't already */ + ixgbe_set_copper_phy_power(hw, false); + return IXGBE_ERR_OVERTEMP; + } + + /* Vendor alarm 2 triggered */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status || !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) + return status; + + /* link connect/disconnect event occurred */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + + if (status) + return status; + + /* Indicate LSC */ + if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) + *lsc = true; + + return 0; +} + +/** + * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts + * @hw: pointer to hardware structure + * + * Enable link status change and temperature failure alarm for the external + * Base T PHY + * + * Returns PHY access status + **/ +static s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + u32 status; + u16 reg; + bool lsc; + + /* Clear interrupt flags */ + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + + /* Enable link status change alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); + if (status) + return status; + + reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); + if (status) + return status; + + /* Enables high temperature failure alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg |= IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + if (status) + return status; + + /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | + IXGBE_MDIO_GLOBAL_ALARM_1_INT); + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + if (status) + return status; + + /* Enable chip-wide vendor alarm */ + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; + + reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; + + status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + + return status; +} + +/** + * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt + * @hw: pointer to hardware structure + * + * Handle external Base T PHY interrupt. If high temperature + * failure alarm then return error, else if link status change + * then setup internal/external PHY link + * + * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature + * failure alarm, else return PHY access status. + **/ +static s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) +{ + struct ixgbe_phy_info *phy = &hw->phy; + bool lsc; + u32 status; + + status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); + if (status) + return status; + + if (lsc) + return phy->ops.setup_internal_link(hw); + + return 0; +} + +/** + * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. + * @hw: pointer to hardware structure + * @speed: link speed + * + * Configures the integrated KR PHY. + **/ +static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, + ixgbe_link_speed speed) +{ + s32 status; + u32 reg_val; + + status = ixgbe_read_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + if (status) + return status; + + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC); + reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + + /* Advertise 10G support. */ + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + + /* Advertise 1G support. */ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + + /* Restart auto-negotiation. */ + reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = ixgbe_write_iosf_sb_reg_x550(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + + return status; +} + /** ixgbe_setup_kx4_x550em - Configure the KX4 PHY. * @hw: pointer to hardware structure * @@ -1018,85 +1333,82 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw) **/ static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { - s32 status; - u32 reg_val; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); +} - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); - if (status) - return status; +/** ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status + * @hw: address of hardware structure + * @link_up: address of boolean to indicate link status + * + * Returns error code if unable to get link status. + **/ +static s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) +{ + u32 ret; + u16 autoneg_status; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ; - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; - reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | - IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); + *link_up = false; - /* Advertise 10G support. */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; + /* read this twice back to back to indicate current status */ + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret) + return ret; - /* Advertise 1G support. */ - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; + ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_status); + if (ret) + return ret; - /* Restart auto-negotiation. */ - reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); - return status; + return 0; } -/** ixgbe_setup_internal_phy_x550em - Configure integrated KR PHY +/** ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link * @hw: point to hardware structure * - * Configures the integrated KR PHY to talk to the external PHY. The base - * driver will call this function when it gets notification via interrupt from - * the external PHY. This function forces the internal PHY into iXFI mode at - * the correct speed. + * Configures the link between the integrated KR PHY and the external X557 PHY + * The driver will call this function when it gets a link status change + * interrupt from the X557 PHY. This function configures the link speed + * between the PHYs to match the link speed of the BASE-T link. * - * A return of a non-zero value indicates an error, and the base driver should - * not report link up. + * A return of a non-zero value indicates an error, and the base driver should + * not report link up. **/ -static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) +static s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) { - s32 status; - u16 lasi, autoneg_status, speed; ixgbe_link_speed force_speed; + bool link_up; + u32 status; + u16 speed; - /* Verify that the external link status has changed */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_XENPAK_LASI_STATUS, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, &lasi); + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) + return IXGBE_ERR_CONFIG; + + /* If link is not up, then there is no setup necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; - /* If there was no change in link status, we can just exit */ - if (!(lasi & IXGBE_XENPAK_LASI_LINK_STATUS_ALARM)) + if (!link_up) return 0; - /* we read this twice back to back to indicate current status */ - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); + &speed); if (status) return status; - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &autoneg_status); + /* If link is not still up, then no setup is necessary so return */ + status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); if (status) return status; - /* If link is not up return an error indicating treat link as down */ - if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) - return IXGBE_ERR_INVALID_LINK_SETTINGS; - - status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, - &speed); + if (!link_up) + return 0; /* clear everything but the speed and duplex bits */ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; @@ -1116,6 +1428,22 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) return ixgbe_setup_ixfi_x550em(hw, &force_speed); } +/** ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI + * @hw: pointer to hardware structure + **/ +static s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) +{ + s32 status; + + status = ixgbe_reset_phy_generic(hw); + + if (status) + return status; + + /* Configure Link Status Alarm and Temperature Threshold interrupts */ + return ixgbe_enable_lasi_ext_t_x550em(hw); +} + /** ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -1126,25 +1454,32 @@ static s32 ixgbe_setup_internal_phy_x550em(struct ixgbe_hw *hw) static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) { struct ixgbe_phy_info *phy = &hw->phy; + ixgbe_link_speed speed; s32 ret_val; - u32 esdp; - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) { - esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); - phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + hw->mac.ops.set_lan_id(hw); - if (hw->bus.lan_id) { - esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); - esdp |= IXGBE_ESDP_SDP1_DIR; + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { + phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; + ixgbe_setup_mux_ctl(hw); + + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal PHY mode is KR, then initialize KR link */ + if (phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE) { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); } - esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); - IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); } /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); - /* Setup function pointers based on detected SFP module and speeds */ + /* Setup function pointers based on detected hardware */ ixgbe_init_mac_link_ops_X550em(hw); if (phy->sfp_type != ixgbe_sfp_type_unknown) phy->ops.reset = NULL; @@ -1162,11 +1497,30 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_ext_t: - phy->ops.setup_internal_link = ixgbe_setup_internal_phy_x550em; + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode + */ + phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If internal link mode is XFI, then setup iXFI internal link, + * else setup KR now. + */ + if (!(phy->nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { + phy->ops.setup_internal_link = + ixgbe_setup_internal_phy_t_x550em; + } else { + speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + ret_val = ixgbe_setup_kr_speed_x550em(hw, speed); + } + + phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; + phy->ops.reset = ixgbe_reset_phy_t_X550em; break; default: break; } + return ret_val; } @@ -1207,65 +1561,35 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) { s32 status; u16 reg; - u32 retries = 2; - - do { - /* decrement retries counter and exit if we hit 0 */ - if (retries < 1) { - hw_dbg(hw, "External PHY not yet finished resetting."); - return IXGBE_ERR_PHY; - } - retries--; - - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_TX_VENDOR_ALARMS_3, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - ®); - if (status) - return status; - - /* Verify PHY FW reset has completed */ - } while ((reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) != 1); - - /* Set port to low power mode */ - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - if (status) - return status; - /* Enable the transmitter */ status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, + IXGBE_MDIO_TX_VENDOR_ALARMS_3, IXGBE_MDIO_PMA_PMD_DEV_TYPE, ®); if (status) return status; - reg &= ~IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE; + /* If PHY FW reset completed bit is set then this is the first + * SW instance after a power on so the PHY FW must be un-stalled. + */ + if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { + status = hw->phy.ops.read_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + ®); + if (status) + return status; - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR, - IXGBE_MDIO_PMA_PMD_DEV_TYPE, - reg); - if (status) - return status; + reg &= ~IXGBE_MDIO_POWER_UP_STALL; - /* Un-stall the PHY FW */ - status = hw->phy.ops.read_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - ®); - if (status) - return status; - - reg &= ~IXGBE_MDIO_POWER_UP_STALL; + status = hw->phy.ops.write_reg(hw, + IXGBE_MDIO_GLOBAL_RES_PR_10, + IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + reg); + if (status) + return status; + } - status = hw->phy.ops.write_reg(hw, - IXGBE_MDIO_GLOBAL_RES_PR_10, - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, - reg); return status; } @@ -1282,6 +1606,7 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) s32 status; u32 ctrl = 0; u32 i; + u32 hlreg0; bool link_up = false; /* Call adapter stop to disable Tx/Rx and clear interrupts */ @@ -1366,6 +1691,15 @@ mac_reset_top: hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) { + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + hlreg0 &= ~IXGBE_HLREG0_MDCSPD; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + } + + if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) + ixgbe_setup_mux_ctl(hw); + return status; } @@ -1518,6 +1852,10 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = { .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \ .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \ .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \ + .read_reg = &ixgbe_read_phy_reg_generic, \ + .write_reg = &ixgbe_write_phy_reg_generic, \ + .setup_link = &ixgbe_setup_phy_link_generic, \ + .set_phy_power = &ixgbe_set_copper_phy_power, \ .check_overtemp = &ixgbe_tn_check_overtemp, \ .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, @@ -1525,9 +1863,6 @@ static struct ixgbe_phy_operations phy_ops_X550 = { X550_COMMON_PHY .init = NULL, .identify = &ixgbe_identify_phy_generic, - .read_reg = &ixgbe_read_phy_reg_generic, - .write_reg = &ixgbe_write_phy_reg_generic, - .setup_link = &ixgbe_setup_phy_link_generic, .read_i2c_combined = &ixgbe_read_i2c_combined_generic, .write_i2c_combined = &ixgbe_write_i2c_combined_generic, }; @@ -1536,9 +1871,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = { X550_COMMON_PHY .init = &ixgbe_init_phy_ops_X550em, .identify = &ixgbe_identify_phy_x550em, - .read_reg = NULL, /* defined later */ - .write_reg = NULL, /* defined later */ - .setup_link = NULL, /* defined later */ +}; + +static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550) +}; + +static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { + IXGBE_MVALS_INIT(X550EM_x) }; struct ixgbe_info ixgbe_X550_info = { @@ -1548,6 +1888,7 @@ struct ixgbe_info ixgbe_X550_info = { .eeprom_ops = &eeprom_ops_X550, .phy_ops = &phy_ops_X550, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550, }; struct ixgbe_info ixgbe_X550EM_x_info = { @@ -1557,4 +1898,5 @@ struct ixgbe_info ixgbe_X550EM_x_info = { .eeprom_ops = &eeprom_ops_X550EM_x, .phy_ops = &phy_ops_X550EM_x, .mbx_ops = &mbx_ops_generic, + .mvals = ixgbe_mvals_X550EM_x, }; diff --git a/include/net/af_unix.h b/include/net/af_unix.h index a175ba4a7adb..4a167b30a12f 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -39,7 +39,6 @@ struct unix_skb_parms { }; #define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb)) -#define UNIXSID(skb) (&UNIXCB((skb)).secid) #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h index 3e6184cf2f6d..5079b9d57e31 100644 --- a/include/uapi/linux/can/gw.h +++ b/include/uapi/linux/can/gw.h @@ -78,6 +78,7 @@ enum { CGW_FILTER, /* specify struct can_filter on source CAN device */ CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */ CGW_LIM_HOPS, /* limit the number of hops of this specific rule */ + CGW_MOD_UID, /* user defined identifier for modification updates */ __CGW_MAX }; @@ -162,6 +163,10 @@ enum { * load time of the can-gw module). This value is used to reduce the number of * possible hops for this gateway rule to a value smaller then max_hops. * + * CGW_MOD_UID (length 4 bytes): + * Optional non-zero user defined routing job identifier to alter existing + * modification settings at runtime. + * * CGW_CS_XOR (length 4 bytes): * Set a simple XOR checksum starting with an initial value into * data[result-idx] using data[start-idx] .. data[end-idx] diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index cecb482ed919..13949a71591d 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -873,13 +873,15 @@ out: return err; } -static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan) +static int fdb_delete_by_addr_and_port(struct net_bridge_port *p, + const u8 *addr, u16 vlan) { + struct net_bridge *br = p->br; struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)]; struct net_bridge_fdb_entry *fdb; fdb = fdb_find(head, addr, vlan); - if (!fdb) + if (!fdb || fdb->dst != p) return -ENOENT; fdb_delete(br, fdb); @@ -892,7 +894,7 @@ static int __br_fdb_delete(struct net_bridge_port *p, int err; spin_lock_bh(&p->br->hash_lock); - err = fdb_delete_by_addr(p->br, addr, vid); + err = fdb_delete_by_addr_and_port(p, addr, vid); spin_unlock_bh(&p->br->hash_lock); return err; diff --git a/net/can/gw.c b/net/can/gw.c index a6f448e18ea8..455168718c2e 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -110,6 +110,7 @@ struct cf_mod { void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor); void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8); } csumfunc; + u32 uid; }; @@ -548,6 +549,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type, goto cancel; } + if (gwj->mod.uid) { + if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0) + goto cancel; + } + if (gwj->mod.csumfunc.crc8) { if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN, &gwj->mod.csum.crc8) < 0) @@ -619,6 +625,7 @@ static const struct nla_policy cgw_policy[CGW_MAX+1] = { [CGW_DST_IF] = { .type = NLA_U32 }, [CGW_FILTER] = { .len = sizeof(struct can_filter) }, [CGW_LIM_HOPS] = { .type = NLA_U8 }, + [CGW_MOD_UID] = { .type = NLA_U32 }, }; /* check for common and gwtype specific attributes */ @@ -761,6 +768,10 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod, else mod->csumfunc.xor = cgw_csum_xor_neg; } + + if (tb[CGW_MOD_UID]) { + nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32)); + } } if (gwtype == CGW_TYPE_CAN_CAN) { @@ -802,6 +813,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) { struct rtcanmsg *r; struct cgw_job *gwj; + struct cf_mod mod; + struct can_can_gw ccgw; u8 limhops = 0; int err = 0; @@ -819,6 +832,36 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (r->gwtype != CGW_TYPE_CAN_CAN) return -EINVAL; + err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops); + if (err < 0) + return err; + + if (mod.uid) { + + ASSERT_RTNL(); + + /* check for updating an existing job with identical uid */ + hlist_for_each_entry(gwj, &cgw_list, list) { + + if (gwj->mod.uid != mod.uid) + continue; + + /* interfaces & filters must be identical */ + if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) + return -EINVAL; + + /* update modifications with disabled softirq & quit */ + local_bh_disable(); + memcpy(&gwj->mod, &mod, sizeof(mod)); + local_bh_enable(); + return 0; + } + } + + /* ifindex == 0 is not allowed for job creation */ + if (!ccgw.src_idx || !ccgw.dst_idx) + return -ENODEV; + gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL); if (!gwj) return -ENOMEM; @@ -828,18 +871,14 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) gwj->deleted_frames = 0; gwj->flags = r->flags; gwj->gwtype = r->gwtype; + gwj->limit_hops = limhops; - err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw, - &limhops); - if (err < 0) - goto out; + /* insert already parsed information */ + memcpy(&gwj->mod, &mod, sizeof(mod)); + memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw)); err = -ENODEV; - /* ifindex == 0 is not allowed for job creation */ - if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx) - goto out; - gwj->src.dev = __dev_get_by_index(&init_net, gwj->ccgw.src_idx); if (!gwj->src.dev) @@ -856,8 +895,6 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (gwj->dst.dev->type != ARPHRD_CAN) goto out; - gwj->limit_hops = limhops; - ASSERT_RTNL(); err = cgw_register_filter(gwj); @@ -931,8 +968,15 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh) if (gwj->limit_hops != limhops) continue; - if (memcmp(&gwj->mod, &mod, sizeof(mod))) - continue; + /* we have a match when uid is enabled and identical */ + if (gwj->mod.uid || mod.uid) { + if (gwj->mod.uid != mod.uid) + continue; + } else { + /* no uid => check for identical modifications */ + if (memcmp(&gwj->mod, &mod, sizeof(mod))) + continue; + } /* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */ if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw))) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index f25e1675b865..03ee4d359f6a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -140,12 +140,17 @@ static struct hlist_head *unix_sockets_unbound(void *addr) #ifdef CONFIG_SECURITY_NETWORK static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) { - memcpy(UNIXSID(skb), &scm->secid, sizeof(u32)); + UNIXCB(skb).secid = scm->secid; } static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { - scm->secid = *UNIXSID(skb); + scm->secid = UNIXCB(skb).secid; +} + +static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) +{ + return (scm->secid == UNIXCB(skb).secid); } #else static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) @@ -153,6 +158,11 @@ static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb) static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) { } + +static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb) +{ + return true; +} #endif /* CONFIG_SECURITY_NETWORK */ /* @@ -1414,6 +1424,7 @@ static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool sen UNIXCB(skb).uid = scm->creds.uid; UNIXCB(skb).gid = scm->creds.gid; UNIXCB(skb).fp = NULL; + unix_get_secdata(scm, skb); if (scm->fp && send_fds) err = unix_attach_fds(scm, skb); @@ -1509,7 +1520,6 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, if (err < 0) goto out_free; max_level = err + 1; - unix_get_secdata(&scm, skb); skb_put(skb, len - data_len); skb->data_len = data_len; @@ -2118,11 +2128,13 @@ unlock: /* Never glue messages from different writers */ if ((UNIXCB(skb).pid != scm.pid) || !uid_eq(UNIXCB(skb).uid, scm.creds.uid) || - !gid_eq(UNIXCB(skb).gid, scm.creds.gid)) + !gid_eq(UNIXCB(skb).gid, scm.creds.gid) || + !unix_secdata_eq(&scm, skb)) break; } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { /* Copy credentials */ scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid); + unix_set_secdata(&scm, skb); check_creds = true; } |