diff options
36 files changed, 857 insertions, 1119 deletions
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index a977339fbe0a..671cccf0dcd2 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -44,11 +44,17 @@ timeval of SO_TIMESTAMP (ms). Supports multiple types of timestamp requests. As a result, this socket option takes a bitmap of flags, not a boolean. In - err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, &val); + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, + sizeof(val)); val is an integer with any of the following bits set. Setting other bit returns EINVAL and does not change the current state. +The socket option configures timestamp generation for individual +sk_buffs (1.3.1), timestamp reporting to the socket's error +queue (1.3.2) and options (1.3.3). Timestamp generation can also +be enabled for individual sendmsg calls using cmsg (1.3.4). + 1.3.1 Timestamp Generation @@ -71,13 +77,16 @@ SOF_TIMESTAMPING_RX_SOFTWARE: kernel receive stack. SOF_TIMESTAMPING_TX_HARDWARE: - Request tx timestamps generated by the network adapter. + Request tx timestamps generated by the network adapter. This flag + can be enabled via both socket options and control messages. SOF_TIMESTAMPING_TX_SOFTWARE: Request tx timestamps when data leaves the kernel. These timestamps are generated in the device driver as close as possible, but always prior to, passing the packet to the network interface. Hence, they require driver support and may not be available for all devices. + This flag can be enabled via both socket options and control messages. + SOF_TIMESTAMPING_TX_SCHED: Request tx timestamps prior to entering the packet scheduler. Kernel @@ -90,7 +99,8 @@ SOF_TIMESTAMPING_TX_SCHED: machines with virtual devices where a transmitted packet travels through multiple devices and, hence, multiple packet schedulers, a timestamp is generated at each layer. This allows for fine - grained measurement of queuing delay. + grained measurement of queuing delay. This flag can be enabled + via both socket options and control messages. SOF_TIMESTAMPING_TX_ACK: Request tx timestamps when all data in the send buffer has been @@ -99,6 +109,7 @@ SOF_TIMESTAMPING_TX_ACK: over-report measurement, because the timestamp is generated when all data up to and including the buffer at send() was acknowledged: the cumulative acknowledgment. The mechanism ignores SACK and FACK. + This flag can be enabled via both socket options and control messages. 1.3.2 Timestamp Reporting @@ -183,6 +194,37 @@ having access to the contents of the original packet, so cannot be combined with SOF_TIMESTAMPING_OPT_TSONLY. +1.3.4. Enabling timestamps via control messages + +In addition to socket options, timestamp generation can be requested +per write via cmsg, only for SOF_TIMESTAMPING_TX_* (see Section 1.3.1). +Using this feature, applications can sample timestamps per sendmsg() +without paying the overhead of enabling and disabling timestamps via +setsockopt: + + struct msghdr *msg; + ... + cmsg = CMSG_FIRSTHDR(msg); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SO_TIMESTAMPING; + cmsg->cmsg_len = CMSG_LEN(sizeof(__u32)); + *((__u32 *) CMSG_DATA(cmsg)) = SOF_TIMESTAMPING_TX_SCHED | + SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_TX_ACK; + err = sendmsg(fd, msg, 0); + +The SOF_TIMESTAMPING_TX_* flags set via cmsg will override +the SOF_TIMESTAMPING_TX_* flags set via setsockopt. + +Moreover, applications must still enable timestamp reporting via +setsockopt to receive timestamps: + + __u32 val = SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_OPT_ID /* or any other flag */; + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, + sizeof(val)); + + 1.4 Bytestream Timestamps The SO_TIMESTAMPING interface supports timestamping of bytes in a diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c index a92ca651c399..24070287c2bc 100644 --- a/drivers/net/dsa/mv88e6131.c +++ b/drivers/net/dsa/mv88e6131.c @@ -169,6 +169,17 @@ struct dsa_switch_driver mv88e6131_switch_driver = { .get_ethtool_stats = mv88e6xxx_get_ethtool_stats, .get_sset_count = mv88e6xxx_get_sset_count, .adjust_link = mv88e6xxx_adjust_link, + .port_bridge_join = mv88e6xxx_port_bridge_join, + .port_bridge_leave = mv88e6xxx_port_bridge_leave, + .port_vlan_filtering = mv88e6xxx_port_vlan_filtering, + .port_vlan_prepare = mv88e6xxx_port_vlan_prepare, + .port_vlan_add = mv88e6xxx_port_vlan_add, + .port_vlan_del = mv88e6xxx_port_vlan_del, + .port_vlan_dump = mv88e6xxx_port_vlan_dump, + .port_fdb_prepare = mv88e6xxx_port_fdb_prepare, + .port_fdb_add = mv88e6xxx_port_fdb_add, + .port_fdb_del = mv88e6xxx_port_fdb_del, + .port_fdb_dump = mv88e6xxx_port_fdb_dump, }; MODULE_ALIAS("platform:mv88e6085"); diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index 50454be86570..0dda2817d0ec 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c @@ -482,6 +482,50 @@ static bool mv88e6xxx_6352_family(struct dsa_switch *ds) return false; } +static unsigned int mv88e6xxx_num_databases(struct dsa_switch *ds) +{ + struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); + + /* The following devices have 4-bit identifiers for 16 databases */ + if (ps->id == PORT_SWITCH_ID_6061) + return 16; + + /* The following devices have 6-bit identifiers for 64 databases */ + if (ps->id == PORT_SWITCH_ID_6065) + return 64; + + /* The following devices have 8-bit identifiers for 256 databases */ + if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) + return 256; + + /* The following devices have 12-bit identifiers for 4096 databases */ + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + return 4096; + + return 0; +} + +static bool mv88e6xxx_has_fid_reg(struct dsa_switch *ds) +{ + /* Does the device have dedicated FID registers for ATU and VTU ops? */ + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + return true; + + return false; +} + +static bool mv88e6xxx_has_stu(struct dsa_switch *ds) +{ + /* Does the device have STU and dedicated SID registers for VTU ops? */ + if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || + mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) + return true; + + return false; +} + /* We expect the switch to perform auto negotiation if there is a real * phy. However, in the case of a fixed link phy, we force the port * settings from the fixed link settings. @@ -951,10 +995,30 @@ out: return ret; } -static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd) +static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 fid, u16 cmd) { int ret; + if (mv88e6xxx_has_fid_reg(ds)) { + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); + if (ret < 0) + return ret; + } else if (mv88e6xxx_num_databases(ds) == 256) { + /* ATU DBNum[7:4] are located in ATU Control 15:12 */ + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL); + if (ret < 0) + return ret; + + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_CONTROL, + (ret & 0xfff) | + ((fid << 8) & 0xf000)); + if (ret < 0) + return ret; + + /* ATU DBNum[3:0] are located in ATU Operation 3:0 */ + cmd |= fid & 0xf; + } + ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd); if (ret < 0) return ret; @@ -1001,11 +1065,6 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds, return err; if (entry->fid) { - err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, - entry->fid); - if (err) - return err; - op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB : GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB; } else { @@ -1013,7 +1072,7 @@ static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds, GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC; } - return _mv88e6xxx_atu_cmd(ds, op); + return _mv88e6xxx_atu_cmd(ds, entry->fid, op); } static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too) @@ -1321,15 +1380,27 @@ static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, if (ret < 0) return ret; - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + if (mv88e6xxx_has_fid_reg(ds)) { ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_FID); if (ret < 0) return ret; next.fid = ret & GLOBAL_VTU_FID_MASK; + } else if (mv88e6xxx_num_databases(ds) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, + GLOBAL_VTU_OP); + if (ret < 0) + return ret; + + next.fid = (ret & 0xf00) >> 4; + next.fid |= ret & 0xf; + } + if (mv88e6xxx_has_stu(ds)) { ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID); if (ret < 0) @@ -1397,6 +1468,7 @@ unlock: static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, struct mv88e6xxx_vtu_stu_entry *entry) { + u16 op = GLOBAL_VTU_OP_VTU_LOAD_PURGE; u16 reg = 0; int ret; @@ -1412,17 +1484,24 @@ static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds, if (ret < 0) return ret; - if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) || - mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) { + if (mv88e6xxx_has_stu(ds)) { reg = entry->sid & GLOBAL_VTU_SID_MASK; ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg); if (ret < 0) return ret; + } + if (mv88e6xxx_has_fid_reg(ds)) { reg = entry->fid & GLOBAL_VTU_FID_MASK; ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg); if (ret < 0) return ret; + } else if (mv88e6xxx_num_databases(ds) == 256) { + /* VTU DBNum[7:4] are located in VTU Operation 11:8, and + * VTU DBNum[3:0] are located in VTU Operation 3:0 + */ + op |= (entry->fid & 0xf0) << 8; + op |= entry->fid & 0xf; } reg = GLOBAL_VTU_VID_VALID; @@ -1432,7 +1511,7 @@ loadpurge: if (ret < 0) return ret; - return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE); + return _mv88e6xxx_vtu_cmd(ds, op); } static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid, @@ -1511,9 +1590,17 @@ loadpurge: static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, u16 *old) { + u16 upper_mask; u16 fid; int ret; + if (mv88e6xxx_num_databases(ds) == 4096) + upper_mask = 0xff; + else if (mv88e6xxx_num_databases(ds) == 256) + upper_mask = 0xf; + else + return -EOPNOTSUPP; + /* Port's default FID bits 3:0 are located in reg 0x06, offset 12 */ ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_BASE_VLAN); if (ret < 0) @@ -1536,11 +1623,11 @@ static int _mv88e6xxx_port_fid(struct dsa_switch *ds, int port, u16 *new, if (ret < 0) return ret; - fid |= (ret & PORT_CONTROL_1_FID_11_4_MASK) << 4; + fid |= (ret & upper_mask) << 4; if (new) { - ret &= ~PORT_CONTROL_1_FID_11_4_MASK; - ret |= (*new >> 4) & PORT_CONTROL_1_FID_11_4_MASK; + ret &= ~upper_mask; + ret |= (*new >> 4) & upper_mask; ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, ret); @@ -1604,7 +1691,7 @@ static int _mv88e6xxx_fid_new(struct dsa_switch *ds, u16 *fid) * databases are not needed. Return the next positive available. */ *fid = find_next_zero_bit(fid_bitmap, MV88E6XXX_N_FID, 1); - if (unlikely(*fid == MV88E6XXX_N_FID)) + if (unlikely(*fid >= mv88e6xxx_num_databases(ds))) return -ENOSPC; /* Clear the database */ @@ -1965,11 +2052,7 @@ static int _mv88e6xxx_atu_load(struct dsa_switch *ds, if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid); - if (ret < 0) - return ret; - - return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB); + return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB); } static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port, @@ -2052,11 +2135,7 @@ static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid, if (ret < 0) return ret; - ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid); - if (ret < 0) - return ret; - - ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB); + ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB); if (ret < 0) return ret; @@ -2444,7 +2523,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) reg = 0; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) || - mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds)) + mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds) || + mv88e6xxx_6185_family(ds)) reg = PORT_CONTROL_2_MAP_DA; if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) || diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 48a7d7dee846..eec3200ade4a 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -61,8 +61,7 @@ #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_ENABLED (0x1 << 1) -/* - * Graceful stop timeouts in us. We should allow up to +/* Graceful stop timeouts in us. We should allow up to * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) */ #define MACB_HALT_TIMEOUT 1230 @@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value) writel_relaxed(value, bp->regs + offset); } -/* - * Find the CPU endianness by using the loopback bit of NCR register. When the - * CPU is in big endian we need to program swaped mode for management +/* Find the CPU endianness by using the loopback bit of NCR register. When the + * CPU is in big endian we need to program swapped mode for management * descriptor access. */ static bool hw_is_native_io(void __iomem *addr) @@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp) pdata = dev_get_platdata(&bp->pdev->dev); - /* Check all 4 address register for vaild address */ + /* Check all 4 address register for valid address */ for (i = 0; i < 4; i++) { bottom = macb_or_gem_readl(bp, SA1B + i * 8); top = macb_or_gem_readl(bp, SA1T + i * 8); @@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev) ferr = DIV_ROUND_UP(ferr, rate / 100000); if (ferr > 5) netdev_warn(dev, "unable to generate target frequency: %ld Hz\n", - rate); + rate); if (clk_set_rate(clk, rate_rounded)) netdev_err(dev, "adjusting tx_clk failed.\n"); @@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev) pdata = dev_get_platdata(&bp->pdev->dev); if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { - ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); + ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, + "phy int"); if (!ret) { phy_irq = gpio_to_irq(pdata->phy_irq_pin); phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; @@ -430,7 +429,7 @@ static int macb_mii_init(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(MPE)); bp->mii_bus = mdiobus_alloc(); - if (bp->mii_bus == NULL) { + if (!bp->mii_bus) { err = -ENOMEM; goto err_out; } @@ -439,7 +438,7 @@ static int macb_mii_init(struct macb *bp) bp->mii_bus->read = &macb_mdio_read; bp->mii_bus->write = &macb_mdio_write; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", - bp->pdev->name, bp->pdev->id); + bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; bp->mii_bus->parent = &bp->dev->dev; pdata = dev_get_platdata(&bp->pdev->dev); @@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp) err = of_mdiobus_register(bp->mii_bus, np); /* fallback to standard phy registration if no phy were - found during dt phy registration */ + * found during dt phy registration + */ if (!err && !phy_find_first(bp->mii_bus)) { for (i = 0; i < PHY_MAX_ADDR; i++) { struct phy_device *phydev; @@ -499,7 +499,7 @@ static void macb_update_stats(struct macb *bp) WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); - for(; p < end; p++, offset += 4) + for (; p < end; p++, offset += 4) *p += bp->macb_reg_readl(bp, offset); } @@ -567,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work) /* Make sure nobody is trying to queue up new packets */ netif_tx_stop_all_queues(bp->dev); - /* - * Stop transmission now + /* Stop transmission now * (in case we have just queued new packets) * macb/gem must be halted to write TBQP register */ @@ -576,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work) /* Just complain for now, reinitializing TX path can be good */ netdev_err(bp->dev, "BUG: halt tx timed out\n"); - /* - * Treat frames in TX queue including the ones that caused the error. + /* Treat frames in TX queue including the ones that caused the error. * Free transmit buffers in upper layer. */ for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { @@ -607,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work) bp->stats.tx_bytes += skb->len; } } else { - /* - * "Buffers exhausted mid-frame" errors may only happen - * if the driver is buggy, so complain loudly about those. - * Statistics are updated by hardware. + /* "Buffers exhausted mid-frame" errors may only happen + * if the driver is buggy, so complain loudly about + * those. Statistics are updated by hardware. */ if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) netdev_err(bp->dev, @@ -662,7 +659,7 @@ static void macb_tx_interrupt(struct macb_queue *queue) queue_writel(queue, ISR, MACB_BIT(TCOMP)); netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", - (unsigned long)status); + (unsigned long)status); head = queue->tx_head; for (tail = queue->tx_tail; tail != head; tail++) { @@ -722,7 +719,8 @@ static void gem_rx_refill(struct macb *bp) struct sk_buff *skb; dma_addr_t paddr; - while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { + while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, + RX_RING_SIZE) > 0) { entry = macb_rx_ring_wrap(bp->rx_prepared_head); /* Make hw descriptor updates visible to CPU */ @@ -730,10 +728,10 @@ static void gem_rx_refill(struct macb *bp) bp->rx_prepared_head++; - if (bp->rx_skbuff[entry] == NULL) { + if (!bp->rx_skbuff[entry]) { /* allocate sk_buff for this free entry in ring */ skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); - if (unlikely(skb == NULL)) { + if (unlikely(!skb)) { netdev_err(bp->dev, "Unable to allocate sk_buff\n"); break; @@ -741,7 +739,8 @@ static void gem_rx_refill(struct macb *bp) /* now fill corresponding descriptor entry */ paddr = dma_map_single(&bp->pdev->dev, skb->data, - bp->rx_buffer_size, DMA_FROM_DEVICE); + bp->rx_buffer_size, + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, paddr)) { dev_kfree_skb(skb); break; @@ -766,7 +765,7 @@ static void gem_rx_refill(struct macb *bp) wmb(); netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", - bp->rx_prepared_head, bp->rx_tail); + bp->rx_prepared_head, bp->rx_tail); } /* Mark DMA descriptors from begin up to and not including end as unused */ @@ -777,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, for (frag = begin; frag != end; frag++) { struct macb_dma_desc *desc = macb_rx_desc(bp, frag); + desc->addr &= ~MACB_BIT(RX_USED); } /* Make descriptor updates visible to hardware */ wmb(); - /* - * When this happens, the hardware stats registers for + /* When this happens, the hardware stats registers for * whatever caused this is updated, so we don't have to record * anything. */ @@ -880,11 +879,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, len = desc->ctrl & bp->rx_frm_len_mask; netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", - macb_rx_ring_wrap(first_frag), - macb_rx_ring_wrap(last_frag), len); + macb_rx_ring_wrap(first_frag), + macb_rx_ring_wrap(last_frag), len); - /* - * The ethernet header starts NET_IP_ALIGN bytes into the + /* The ethernet header starts NET_IP_ALIGN bytes into the * first buffer. Since the header is 14 bytes, this makes the * payload word-aligned. * @@ -924,7 +922,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, frag_len = len - offset; } skb_copy_to_linear_data_offset(skb, offset, - macb_rx_buffer(bp, frag), frag_len); + macb_rx_buffer(bp, frag), + frag_len); offset += bp->rx_buffer_size; desc = macb_rx_desc(bp, frag); desc->addr &= ~MACB_BIT(RX_USED); @@ -942,7 +941,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, bp->stats.rx_packets++; bp->stats.rx_bytes += skb->len; netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n", - skb->len, skb->csum); + skb->len, skb->csum); netif_receive_skb(skb); return 0; @@ -1049,7 +1048,7 @@ static int macb_poll(struct napi_struct *napi, int budget) work_done = 0; netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", - (unsigned long)status, budget); + (unsigned long)status, budget); work_done = bp->macbgem_ops.mog_rx(bp, budget); if (work_done < budget) { @@ -1099,8 +1098,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) (unsigned long)status); if (status & MACB_RX_INT_FLAGS) { - /* - * There's no point taking any more interrupts + /* There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts @@ -1129,8 +1127,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) if (status & MACB_BIT(TCOMP)) macb_tx_interrupt(queue); - /* - * Link change detection isn't possible with RMII, so we'll + /* Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ @@ -1161,8 +1158,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } if (status & MACB_BIT(HRESP)) { - /* - * TODO: Reset the hardware, and maybe move the + /* TODO: Reset the hardware, and maybe move the * netdev_err to a lower-priority context as well * (work queue?) */ @@ -1181,8 +1177,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) } #ifdef CONFIG_NET_POLL_CONTROLLER -/* - * Polling receive - used by netconsole and other diagnostic tools +/* Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void macb_poll_controller(struct net_device *dev) @@ -1268,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp, } /* Should never happen */ - if (unlikely(tx_skb == NULL)) { + if (unlikely(!tx_skb)) { netdev_err(bp->dev, "BUG! empty skb!\n"); return 0; } @@ -1338,16 +1333,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) #if defined(DEBUG) && defined(VERBOSE_DEBUG) netdev_vdbg(bp->dev, - "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", - queue_index, skb->len, skb->head, skb->data, - skb_tail_pointer(skb), skb_end_pointer(skb)); + "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", + queue_index, skb->len, skb->head, skb->data, + skb_tail_pointer(skb), skb_end_pointer(skb)); print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, 16, true); #endif /* Count how many TX buffer descriptors are needed to send this * socket buffer: skb fragments of jumbo frames may need to be - * splitted into many buffer descriptors. + * split into many buffer descriptors. */ count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length); nr_frags = skb_shinfo(skb)->nr_frags; @@ -1398,8 +1393,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size) if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) { netdev_dbg(bp->dev, - "RX buffer must be multiple of %d bytes, expanding\n", - RX_BUFFER_MULTIPLE); + "RX buffer must be multiple of %d bytes, expanding\n", + RX_BUFFER_MULTIPLE); bp->rx_buffer_size = roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE); } @@ -1422,7 +1417,7 @@ static void gem_free_rx_buffers(struct macb *bp) for (i = 0; i < RX_RING_SIZE; i++) { skb = bp->rx_skbuff[i]; - if (skb == NULL) + if (!skb) continue; desc = &bp->rx_ring[i]; @@ -1478,10 +1473,10 @@ static int gem_alloc_rx_buffers(struct macb *bp) bp->rx_skbuff = kzalloc(size, GFP_KERNEL); if (!bp->rx_skbuff) return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated %d RX struct sk_buff entries at %p\n", - RX_RING_SIZE, bp->rx_skbuff); + + netdev_dbg(bp->dev, + "Allocated %d RX struct sk_buff entries at %p\n", + RX_RING_SIZE, bp->rx_skbuff); return 0; } @@ -1494,10 +1489,10 @@ static int macb_alloc_rx_buffers(struct macb *bp) &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) return -ENOMEM; - else - netdev_dbg(bp->dev, - "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", - size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); + + netdev_dbg(bp->dev, + "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", + size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); return 0; } @@ -1588,8 +1583,7 @@ static void macb_reset_hw(struct macb *bp) struct macb_queue *queue; unsigned int q; - /* - * Disable RX and TX (XXX: Should we halt the transmission + /* Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) */ macb_writel(bp, NCR, 0); @@ -1652,8 +1646,7 @@ static u32 macb_mdc_clk_div(struct macb *bp) return config; } -/* - * Get the DMA bus width field of the network configuration register that we +/* Get the DMA bus width field of the network configuration register that we * should program. We find the width from decoding the design configuration * register to find the maximum supported data bus width. */ @@ -1673,8 +1666,7 @@ static u32 macb_dbw(struct macb *bp) } } -/* - * Configure the receive DMA engine +/* Configure the receive DMA engine * - use the correct receive buffer size * - set best burst length for DMA operations * (if not supported by FIFO, it will fallback to default) @@ -1762,8 +1754,7 @@ static void macb_init_hw(struct macb *bp) macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); } -/* - * The hash address register is 64 bits long and takes up two +/* The hash address register is 64 bits long and takes up two * locations in the memory map. The least significant bits are stored * in EMAC_HSL and the most significant bits in EMAC_HSH. * @@ -1803,9 +1794,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr) return 0; } -/* - * Return the hash index value for the specified address. - */ +/* Return the hash index value for the specified address. */ static int hash_get_index(__u8 *addr) { int i, j, bitval; @@ -1821,9 +1810,7 @@ static int hash_get_index(__u8 *addr) return hash_index; } -/* - * Add multicast addresses to the internal multicast-hash table. - */ +/* Add multicast addresses to the internal multicast-hash table. */ static void macb_sethashtable(struct net_device *dev) { struct netdev_hw_addr *ha; @@ -1831,7 +1818,8 @@ static void macb_sethashtable(struct net_device *dev) unsigned int bitnr; struct macb *bp = netdev_priv(dev); - mc_filter[0] = mc_filter[1] = 0; + mc_filter[0] = 0; + mc_filter[1] = 0; netdev_for_each_mc_addr(ha, dev) { bitnr = hash_get_index(ha->addr); @@ -1842,9 +1830,7 @@ static void macb_sethashtable(struct net_device *dev) macb_or_gem_writel(bp, HRT, mc_filter[1]); } -/* - * Enable/Disable promiscuous and multicast modes. - */ +/* Enable/Disable promiscuous and multicast modes. */ static void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; @@ -2161,9 +2147,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) regs_buff[12] = macb_or_gem_readl(bp, USRIO); - if (macb_is_gem(bp)) { + if (macb_is_gem(bp)) regs_buff[13] = gem_readl(bp, DMACFG); - } } static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -2286,11 +2271,11 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_set_features = macb_set_features, }; -/* - * Configure peripheral capabilities according to device tree +/* Configure peripheral capabilities according to device tree * and integration options used */ -static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) +static void macb_configure_caps(struct macb *bp, + const struct macb_config *dt_conf) { u32 dcfg; @@ -2988,7 +2973,7 @@ static int macb_probe(struct platform_device *pdev) mac = of_get_mac_address(np); if (mac) - memcpy(bp->dev->dev_addr, mac, ETH_ALEN); + ether_addr_copy(bp->dev->dev_addr, mac); else macb_get_hwaddr(bp); @@ -2996,6 +2981,7 @@ static int macb_probe(struct platform_device *pdev) phy_node = of_get_next_available_child(np, NULL); if (phy_node) { int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); + if (gpio_is_valid(gpio)) { bp->reset_gpio = gpio_to_desc(gpio); gpiod_direction_output(bp->reset_gpio, 1); diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b2160d1b9c71..5c1624147778 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -157,6 +157,7 @@ enum ravb_reg { TIC = 0x0378, TIS = 0x037C, ISS = 0x0380, + CIE = 0x0384, /* R-Car Gen3 only */ GCCR = 0x0390, GMTT = 0x0394, GPTC = 0x0398, @@ -170,6 +171,15 @@ enum ravb_reg { GCT0 = 0x03B8, GCT1 = 0x03BC, GCT2 = 0x03C0, + GIE = 0x03CC, /* R-Car Gen3 only */ + GID = 0x03D0, /* R-Car Gen3 only */ + DIL = 0x0440, /* R-Car Gen3 only */ + RIE0 = 0x0460, /* R-Car Gen3 only */ + RID0 = 0x0464, /* R-Car Gen3 only */ + RIE2 = 0x0470, /* R-Car Gen3 only */ + RID2 = 0x0474, /* R-Car Gen3 only */ + TIE = 0x0478, /* R-Car Gen3 only */ + TID = 0x047c, /* R-Car Gen3 only */ /* E-MAC registers */ ECMR = 0x0500, @@ -556,6 +566,16 @@ enum ISS_BIT { ISS_DPS15 = 0x80000000, }; +/* CIE (R-Car Gen3 only) */ +enum CIE_BIT { + CIE_CRIE = 0x00000001, + CIE_CTIE = 0x00000100, + CIE_RQFM = 0x00010000, + CIE_CL0M = 0x00020000, + CIE_RFWL = 0x00040000, + CIE_RFFL = 0x00080000, +}; + /* GCCR */ enum GCCR_BIT { GCCR_TCR = 0x00000003, @@ -592,6 +612,188 @@ enum GIS_BIT { GIS_PTMF = 0x00000004, }; +/* GIE (R-Car Gen3 only) */ +enum GIE_BIT { + GIE_PTCS = 0x00000001, + GIE_PTOS = 0x00000002, + GIE_PTMS0 = 0x00000004, + GIE_PTMS1 = 0x00000008, + GIE_PTMS2 = 0x00000010, + GIE_PTMS3 = 0x00000020, + GIE_PTMS4 = 0x00000040, + GIE_PTMS5 = 0x00000080, + GIE_PTMS6 = 0x00000100, + GIE_PTMS7 = 0x00000200, + GIE_ATCS0 = 0x00010000, + GIE_ATCS1 = 0x00020000, + GIE_ATCS2 = 0x00040000, + GIE_ATCS3 = 0x00080000, + GIE_ATCS4 = 0x00100000, + GIE_ATCS5 = 0x00200000, + GIE_ATCS6 = 0x00400000, + GIE_ATCS7 = 0x00800000, + GIE_ATCS8 = 0x01000000, + GIE_ATCS9 = 0x02000000, + GIE_ATCS10 = 0x04000000, + GIE_ATCS11 = 0x08000000, + GIE_ATCS12 = 0x10000000, + GIE_ATCS13 = 0x20000000, + GIE_ATCS14 = 0x40000000, + GIE_ATCS15 = 0x80000000, +}; + +/* GID (R-Car Gen3 only) */ +enum GID_BIT { + GID_PTCD = 0x00000001, + GID_PTOD = 0x00000002, + GID_PTMD0 = 0x00000004, + GID_PTMD1 = 0x00000008, + GID_PTMD2 = 0x00000010, + GID_PTMD3 = 0x00000020, + GID_PTMD4 = 0x00000040, + GID_PTMD5 = 0x00000080, + GID_PTMD6 = 0x00000100, + GID_PTMD7 = 0x00000200, + GID_ATCD0 = 0x00010000, + GID_ATCD1 = 0x00020000, + GID_ATCD2 = 0x00040000, + GID_ATCD3 = 0x00080000, + GID_ATCD4 = 0x00100000, + GID_ATCD5 = 0x00200000, + GID_ATCD6 = 0x00400000, + GID_ATCD7 = 0x00800000, + GID_ATCD8 = 0x01000000, + GID_ATCD9 = 0x02000000, + GID_ATCD10 = 0x04000000, + GID_ATCD11 = 0x08000000, + GID_ATCD12 = 0x10000000, + GID_ATCD13 = 0x20000000, + GID_ATCD14 = 0x40000000, + GID_ATCD15 = 0x80000000, +}; + +/* RIE0 (R-Car Gen3 only) */ +enum RIE0_BIT { + RIE0_FRS0 = 0x00000001, + RIE0_FRS1 = 0x00000002, + RIE0_FRS2 = 0x00000004, + RIE0_FRS3 = 0x00000008, + RIE0_FRS4 = 0x00000010, + RIE0_FRS5 = 0x00000020, + RIE0_FRS6 = 0x00000040, + RIE0_FRS7 = 0x00000080, + RIE0_FRS8 = 0x00000100, + RIE0_FRS9 = 0x00000200, + RIE0_FRS10 = 0x00000400, + RIE0_FRS11 = 0x00000800, + RIE0_FRS12 = 0x00001000, + RIE0_FRS13 = 0x00002000, + RIE0_FRS14 = 0x00004000, + RIE0_FRS15 = 0x00008000, + RIE0_FRS16 = 0x00010000, + RIE0_FRS17 = 0x00020000, +}; + +/* RID0 (R-Car Gen3 only) */ +enum RID0_BIT { + RID0_FRD0 = 0x00000001, + RID0_FRD1 = 0x00000002, + RID0_FRD2 = 0x00000004, + RID0_FRD3 = 0x00000008, + RID0_FRD4 = 0x00000010, + RID0_FRD5 = 0x00000020, + RID0_FRD6 = 0x00000040, + RID0_FRD7 = 0x00000080, + RID0_FRD8 = 0x00000100, + RID0_FRD9 = 0x00000200, + RID0_FRD10 = 0x00000400, + RID0_FRD11 = 0x00000800, + RID0_FRD12 = 0x00001000, + RID0_FRD13 = 0x00002000, + RID0_FRD14 = 0x00004000, + RID0_FRD15 = 0x00008000, + RID0_FRD16 = 0x00010000, + RID0_FRD17 = 0x00020000, +}; + +/* RIE2 (R-Car Gen3 only) */ +enum RIE2_BIT { + RIE2_QFS0 = 0x00000001, + RIE2_QFS1 = 0x00000002, + RIE2_QFS2 = 0x00000004, + RIE2_QFS3 = 0x00000008, + RIE2_QFS4 = 0x00000010, + RIE2_QFS5 = 0x00000020, + RIE2_QFS6 = 0x00000040, + RIE2_QFS7 = 0x00000080, + RIE2_QFS8 = 0x00000100, + RIE2_QFS9 = 0x00000200, + RIE2_QFS10 = 0x00000400, + RIE2_QFS11 = 0x00000800, + RIE2_QFS12 = 0x00001000, + RIE2_QFS13 = 0x00002000, + RIE2_QFS14 = 0x00004000, + RIE2_QFS15 = 0x00008000, + RIE2_QFS16 = 0x00010000, + RIE2_QFS17 = 0x00020000, + RIE2_RFFS = 0x80000000, +}; + +/* RID2 (R-Car Gen3 only) */ +enum RID2_BIT { + RID2_QFD0 = 0x00000001, + RID2_QFD1 = 0x00000002, + RID2_QFD2 = 0x00000004, + RID2_QFD3 = 0x00000008, + RID2_QFD4 = 0x00000010, + RID2_QFD5 = 0x00000020, + RID2_QFD6 = 0x00000040, + RID2_QFD7 = 0x00000080, + RID2_QFD8 = 0x00000100, + RID2_QFD9 = 0x00000200, + RID2_QFD10 = 0x00000400, + RID2_QFD11 = 0x00000800, + RID2_QFD12 = 0x00001000, + RID2_QFD13 = 0x00002000, + RID2_QFD14 = 0x00004000, + RID2_QFD15 = 0x00008000, + RID2_QFD16 = 0x00010000, + RID2_QFD17 = 0x00020000, + RID2_RFFD = 0x80000000, +}; + +/* TIE (R-Car Gen3 only) */ +enum TIE_BIT { + TIE_FTS0 = 0x00000001, + TIE_FTS1 = 0x00000002, + TIE_FTS2 = 0x00000004, + TIE_FTS3 = 0x00000008, + TIE_TFUS = 0x00000100, + TIE_TFWS = 0x00000200, + TIE_MFUS = 0x00000400, + TIE_MFWS = 0x00000800, + TIE_TDPS0 = 0x00010000, + TIE_TDPS1 = 0x00020000, + TIE_TDPS2 = 0x00040000, + TIE_TDPS3 = 0x00080000, +}; + +/* TID (R-Car Gen3 only) */ +enum TID_BIT { + TID_FTD0 = 0x00000001, + TID_FTD1 = 0x00000002, + TID_FTD2 = 0x00000004, + TID_FTD3 = 0x00000008, + TID_TFUD = 0x00000100, + TID_TFWD = 0x00000200, + TID_MFUD = 0x00000400, + TID_MFWD = 0x00000800, + TID_TDPD0 = 0x00010000, + TID_TDPD1 = 0x00020000, + TID_TDPD2 = 0x00040000, + TID_TDPD3 = 0x00080000, +}; + /* ECMR */ enum ECMR_BIT { ECMR_PRM = 0x00000001, @@ -817,6 +1019,8 @@ struct ravb_private { int duplex; int emac_irq; enum ravb_chip_id chip_id; + int rx_irqs[NUM_RX_QUEUE]; + int tx_irqs[NUM_TX_QUEUE]; unsigned no_avb_link:1; unsigned avb_link_active_low:1; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 087e14a3fba7..4b71951e185d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -42,6 +42,16 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) +static const char *ravb_rx_irqs[NUM_RX_QUEUE] = { + "ch0", /* RAVB_BE */ + "ch1", /* RAVB_NC */ +}; + +static const char *ravb_tx_irqs[NUM_TX_QUEUE] = { + "ch18", /* RAVB_BE */ + "ch19", /* RAVB_NC */ +}; + void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear, u32 set) { @@ -365,6 +375,7 @@ static void ravb_emac_init(struct net_device *ndev) /* Device init function for Ethernet AVB */ static int ravb_dmac_init(struct net_device *ndev) { + struct ravb_private *priv = netdev_priv(ndev); int error; /* Set CONFIG mode */ @@ -401,6 +412,12 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TCCR_TFEN, TCCR); /* Interrupt init: */ + if (priv->chip_id == RCAR_GEN3) { + /* Clear DIL.DPLx */ + ravb_write(ndev, 0, DIL); + /* Set queue specific interrupt */ + ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE); + } /* Frame receive */ ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0); /* Disable FIFO full warning */ @@ -643,7 +660,7 @@ static int ravb_stop_dma(struct net_device *ndev) } /* E-MAC interrupt handler */ -static void ravb_emac_interrupt(struct net_device *ndev) +static void ravb_emac_interrupt_unlocked(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); u32 ecsr, psr; @@ -669,6 +686,18 @@ static void ravb_emac_interrupt(struct net_device *ndev) } } +static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + + spin_lock(&priv->lock); + ravb_emac_interrupt_unlocked(ndev); + mmiowb(); + spin_unlock(&priv->lock); + return IRQ_HANDLED; +} + /* Error interrupt handler */ static void ravb_error_interrupt(struct net_device *ndev) { @@ -695,6 +724,50 @@ static void ravb_error_interrupt(struct net_device *ndev) } } +static bool ravb_queue_interrupt(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + u32 ris0 = ravb_read(ndev, RIS0); + u32 ric0 = ravb_read(ndev, RIC0); + u32 tis = ravb_read(ndev, TIS); + u32 tic = ravb_read(ndev, TIC); + + if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { + if (napi_schedule_prep(&priv->napi[q])) { + /* Mask RX and TX interrupts */ + if (priv->chip_id == RCAR_GEN2) { + ravb_write(ndev, ric0 & ~BIT(q), RIC0); + ravb_write(ndev, tic & ~BIT(q), TIC); + } else { + ravb_write(ndev, BIT(q), RID0); + ravb_write(ndev, BIT(q), TID); + } + __napi_schedule(&priv->napi[q]); + } else { + netdev_warn(ndev, + "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", + ris0, ric0); + netdev_warn(ndev, + " tx status 0x%08x, tx mask 0x%08x.\n", + tis, tic); + } + return true; + } + return false; +} + +static bool ravb_timestamp_interrupt(struct net_device *ndev) +{ + u32 tis = ravb_read(ndev, TIS); + + if (tis & TIS_TFUF) { + ravb_write(ndev, ~TIS_TFUF, TIS); + ravb_get_tx_tstamp(ndev); + return true; + } + return false; +} + static irqreturn_t ravb_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; @@ -708,46 +781,22 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) /* Received and transmitted interrupts */ if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) { - u32 ris0 = ravb_read(ndev, RIS0); - u32 ric0 = ravb_read(ndev, RIC0); - u32 tis = ravb_read(ndev, TIS); - u32 tic = ravb_read(ndev, TIC); int q; /* Timestamp updated */ - if (tis & TIS_TFUF) { - ravb_write(ndev, ~TIS_TFUF, TIS); - ravb_get_tx_tstamp(ndev); + if (ravb_timestamp_interrupt(ndev)) result = IRQ_HANDLED; - } /* Network control and best effort queue RX/TX */ for (q = RAVB_NC; q >= RAVB_BE; q--) { - if (((ris0 & ric0) & BIT(q)) || - ((tis & tic) & BIT(q))) { - if (napi_schedule_prep(&priv->napi[q])) { - /* Mask RX and TX interrupts */ - ric0 &= ~BIT(q); - tic &= ~BIT(q); - ravb_write(ndev, ric0, RIC0); - ravb_write(ndev, tic, TIC); - __napi_schedule(&priv->napi[q]); - } else { - netdev_warn(ndev, - "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n", - ris0, ric0); - netdev_warn(ndev, - " tx status 0x%08x, tx mask 0x%08x.\n", - tis, tic); - } + if (ravb_queue_interrupt(ndev, q)) result = IRQ_HANDLED; - } } } /* E-MAC status summary */ if (iss & ISS_MS) { - ravb_emac_interrupt(ndev); + ravb_emac_interrupt_unlocked(ndev); result = IRQ_HANDLED; } @@ -757,6 +806,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) result = IRQ_HANDLED; } + /* gPTP interrupt status summary */ if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) result = IRQ_HANDLED; @@ -765,6 +815,64 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id) return result; } +/* Timestamp/Error/gPTP interrupt handler */ +static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + u32 iss; + + spin_lock(&priv->lock); + /* Get interrupt status */ + iss = ravb_read(ndev, ISS); + + /* Timestamp updated */ + if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev)) + result = IRQ_HANDLED; + + /* Error status summary */ + if (iss & ISS_ES) { + ravb_error_interrupt(ndev); + result = IRQ_HANDLED; + } + + /* gPTP interrupt status summary */ + if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED) + result = IRQ_HANDLED; + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) +{ + struct net_device *ndev = dev_id; + struct ravb_private *priv = netdev_priv(ndev); + irqreturn_t result = IRQ_NONE; + + spin_lock(&priv->lock); + + /* Network control/Best effort queue RX/TX */ + if (ravb_queue_interrupt(ndev, q)) + result = IRQ_HANDLED; + + mmiowb(); + spin_unlock(&priv->lock); + return result; +} + +static irqreturn_t ravb_be_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_BE); +} + +static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id) +{ + return ravb_dma_interrupt(irq, dev_id, RAVB_NC); +} + static int ravb_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; @@ -804,8 +912,13 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - ravb_modify(ndev, RIC0, mask, mask); - ravb_modify(ndev, TIC, mask, mask); + if (priv->chip_id == RCAR_GEN2) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -1208,35 +1321,72 @@ static const struct ethtool_ops ravb_ethtool_ops = { .get_ts_info = ravb_get_ts_info, }; +static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, + struct net_device *ndev, struct device *dev, + const char *ch) +{ + char *name; + int error; + + name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); + if (!name) + return -ENOMEM; + error = request_irq(irq, handler, 0, name, ndev); + if (error) + netdev_err(ndev, "cannot request IRQ %s\n", name); + + return error; +} + /* Network device open function for Ethernet AVB */ static int ravb_open(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; int error; napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); - error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, - ndev); - if (error) { - netdev_err(ndev, "cannot request IRQ\n"); - goto out_napi_off; - } - - if (priv->chip_id == RCAR_GEN3) { - error = request_irq(priv->emac_irq, ravb_interrupt, - IRQF_SHARED, ndev->name, ndev); + if (priv->chip_id == RCAR_GEN2) { + error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, + ndev->name, ndev); if (error) { netdev_err(ndev, "cannot request IRQ\n"); - goto out_free_irq; + goto out_napi_off; } + } else { + error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, + dev, "ch22:multi"); + if (error) + goto out_napi_off; + error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, + dev, "ch24:emac"); + if (error) + goto out_free_irq; + error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch0:rx_be"); + if (error) + goto out_free_irq_emac; + error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, + ndev, dev, "ch18:tx_be"); + if (error) + goto out_free_irq_be_rx; + error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch1:rx_nc"); + if (error) + goto out_free_irq_be_tx; + error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, + ndev, dev, "ch19:tx_nc"); + if (error) + goto out_free_irq_nc_rx; } /* Device init */ error = ravb_dmac_init(ndev); if (error) - goto out_free_irq2; + goto out_free_irq_nc_tx; ravb_emac_init(ndev); /* Initialise PTP Clock driver */ @@ -1256,9 +1406,18 @@ out_ptp_stop: /* Stop PTP Clock driver */ if (priv->chip_id == RCAR_GEN2) ravb_ptp_stop(ndev); -out_free_irq2: - if (priv->chip_id == RCAR_GEN3) - free_irq(priv->emac_irq, ndev); +out_free_irq_nc_tx: + if (priv->chip_id == RCAR_GEN2) + goto out_free_irq; + free_irq(priv->tx_irqs[RAVB_NC], ndev); +out_free_irq_nc_rx: + free_irq(priv->rx_irqs[RAVB_NC], ndev); +out_free_irq_be_tx: + free_irq(priv->tx_irqs[RAVB_BE], ndev); +out_free_irq_be_rx: + free_irq(priv->rx_irqs[RAVB_BE], ndev); +out_free_irq_emac: + free_irq(priv->emac_irq, ndev); out_free_irq: free_irq(ndev->irq, ndev); out_napi_off: @@ -1713,6 +1872,7 @@ static int ravb_probe(struct platform_device *pdev) struct net_device *ndev; int error, irq, q; struct resource *res; + int i; if (!np) { dev_err(&pdev->dev, @@ -1782,6 +1942,22 @@ static int ravb_probe(struct platform_device *pdev) goto out_release; } priv->emac_irq = irq; + for (i = 0; i < NUM_RX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->rx_irqs[i] = irq; + } + for (i = 0; i < NUM_TX_QUEUE; i++) { + irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]); + if (irq < 0) { + error = irq; + goto out_release; + } + priv->tx_irqs[i] = irq; + } } priv->chip_id = chip_id; diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 57992ccc4657..f1b2cbb336e8 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); + else if (on) + ravb_write(ndev, GIE_PTCS, GIE); + else + ravb_write(ndev, GID_PTCD, GID); mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); @@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); + else + ravb_write(ndev, GIE_PTMS0, GIE); } } else { spin_lock_irqsave(&priv->lock, flags); @@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - ravb_modify(ndev, GIC, GIC_PTME, 0); + if (priv->chip_id == RCAR_GEN2) + ravb_modify(ndev, GIC, GIC_PTME, 0); + else + ravb_write(ndev, GID_PTMD0, GID); } mmiowb(); spin_unlock_irqrestore(&priv->lock, flags); diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig index a2c227bfb687..e070e1222733 100644 --- a/drivers/net/irda/Kconfig +++ b/drivers/net/irda/Kconfig @@ -394,12 +394,5 @@ config MCS_FIR To compile it as a module, choose M here: the module will be called mcs7780. -config SH_IRDA - tristate "SuperH IrDA driver" - depends on IRDA - depends on (ARCH_SHMOBILE || COMPILE_TEST) && HAS_IOMEM - help - Say Y here if your want to enable SuperH IrDA devices. - endmenu diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile index be8ab5b9a4a2..4c344433dae5 100644 --- a/drivers/net/irda/Makefile +++ b/drivers/net/irda/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o obj-$(CONFIG_MCS_FIR) += mcs7780.o obj-$(CONFIG_AU1000_FIR) += au1k_ir.o -obj-$(CONFIG_SH_IRDA) += sh_irda.o # SIR drivers obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o obj-$(CONFIG_BFIN_SIR) += bfin_sir.o diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c deleted file mode 100644 index c96b46b2c3a8..000000000000 --- a/drivers/net/irda/sh_irda.c +++ /dev/null @@ -1,875 +0,0 @@ -/* - * SuperH IrDA Driver - * - * Copyright (C) 2010 Renesas Solutions Corp. - * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> - * - * Based on sh_sir.c - * Copyright (C) 2009 Renesas Solutions Corp. - * Copyright 2006-2009 Analog Devices Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * CAUTION - * - * This driver is very simple. - * So, it doesn't have below support now - * - MIR/FIR support - * - DMA transfer support - * - FIFO mode support - */ -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/pm_runtime.h> -#include <linux/clk.h> -#include <net/irda/wrapper.h> -#include <net/irda/irda_device.h> - -#define DRIVER_NAME "sh_irda" - -#define __IRDARAM_LEN 0x1039 - -#define IRTMR 0x1F00 /* Transfer mode */ -#define IRCFR 0x1F02 /* Configuration */ -#define IRCTR 0x1F04 /* IR control */ -#define IRTFLR 0x1F20 /* Transmit frame length */ -#define IRTCTR 0x1F22 /* Transmit control */ -#define IRRFLR 0x1F40 /* Receive frame length */ -#define IRRCTR 0x1F42 /* Receive control */ -#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */ -#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */ -#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */ -#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */ -#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */ -#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */ -#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */ -#define CRCCTR 0x1F80 /* CRC engine control */ -#define CRCIR 0x1F86 /* CRC engine input data */ -#define CRCCR 0x1F8A /* CRC engine calculation */ -#define CRCOR 0x1F8E /* CRC engine output data */ -#define FIFOCP 0x1FC0 /* FIFO current pointer */ -#define FIFOFP 0x1FC2 /* FIFO follow pointer */ -#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */ -#define FIFORSOR 0x1FC6 /* FIFO receive status OR */ -#define FIFOSEL 0x1FC8 /* FIFO select */ -#define FIFORS 0x1FCA /* FIFO receive status */ -#define FIFORFL 0x1FCC /* FIFO receive frame length */ -#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */ -#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */ -#define BIFCTL 0x1FD2 /* BUS interface control */ -#define IRDARAM 0x0000 /* IrDA buffer RAM */ -#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */ - -/* IRTMR */ -#define TMD_MASK (0x3 << 14) /* Transfer Mode */ -#define TMD_SIR (0x0 << 14) -#define TMD_MIR (0x3 << 14) -#define TMD_FIR (0x2 << 14) - -#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */ -#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */ -#define SIM (1 << 0) /* SIR Interrupt Mask */ -#define xIM_MASK (FIFORIM | MIM | SIM) - -/* IRCFR */ -#define RTO_SHIFT 8 /* shift for Receive Timeout */ -#define RTO (0x3 << RTO_SHIFT) - -/* IRTCTR */ -#define ARMOD (1 << 15) /* Auto-Receive Mode */ -#define TE (1 << 0) /* Transmit Enable */ - -/* IRRFLR */ -#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */ - -/* IRRCTR */ -#define RE (1 << 0) /* Receive Enable */ - -/* - * SIRISR, SIRIMR, SIRICR, - * MFIRISR, MFIRIMR, MFIRICR - */ -#define FRE (1 << 15) /* Frame Receive End */ -#define TROV (1 << 11) /* Transfer Area Overflow */ -#define xIR_9 (1 << 9) -#define TOT xIR_9 /* for SIR Timeout */ -#define ABTD xIR_9 /* for MIR/FIR Abort Detection */ -#define xIR_8 (1 << 8) -#define FER xIR_8 /* for SIR Framing Error */ -#define CRCER xIR_8 /* for MIR/FIR CRC error */ -#define FTE (1 << 7) /* Frame Transmit End */ -#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE) - -/* SIRBCR */ -#define BRC_MASK (0x3F) /* mask for Baud Rate Count */ - -/* CRCCTR */ -#define CRC_RST (1 << 15) /* CRC Engine Reset */ -#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */ - -/* CRCIR */ -#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */ - -/************************************************************************ - - - enum / structure - - -************************************************************************/ -enum sh_irda_mode { - SH_IRDA_NONE = 0, - SH_IRDA_SIR, - SH_IRDA_MIR, - SH_IRDA_FIR, -}; - -struct sh_irda_self; -struct sh_irda_xir_func { - int (*xir_fre) (struct sh_irda_self *self); - int (*xir_trov) (struct sh_irda_self *self); - int (*xir_9) (struct sh_irda_self *self); - int (*xir_8) (struct sh_irda_self *self); - int (*xir_fte) (struct sh_irda_self *self); -}; - -struct sh_irda_self { - void __iomem *membase; - unsigned int irq; - struct platform_device *pdev; - - struct net_device *ndev; - - struct irlap_cb *irlap; - struct qos_info qos; - - iobuff_t tx_buff; - iobuff_t rx_buff; - - enum sh_irda_mode mode; - spinlock_t lock; - - struct sh_irda_xir_func *xir_func; -}; - -/************************************************************************ - - - common function - - -************************************************************************/ -static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data) -{ - unsigned long flags; - - spin_lock_irqsave(&self->lock, flags); - iowrite16(data, self->membase + offset); - spin_unlock_irqrestore(&self->lock, flags); -} - -static u16 sh_irda_read(struct sh_irda_self *self, u32 offset) -{ - unsigned long flags; - u16 ret; - - spin_lock_irqsave(&self->lock, flags); - ret = ioread16(self->membase + offset); - spin_unlock_irqrestore(&self->lock, flags); - - return ret; -} - -static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset, - u16 mask, u16 data) -{ - unsigned long flags; - u16 old, new; - - spin_lock_irqsave(&self->lock, flags); - old = ioread16(self->membase + offset); - new = (old & ~mask) | data; - if (old != new) - iowrite16(data, self->membase + offset); - spin_unlock_irqrestore(&self->lock, flags); -} - -/************************************************************************ - - - mode function - - -************************************************************************/ -/*===================================== - * - * common - * - *=====================================*/ -static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable) -{ - struct device *dev = &self->ndev->dev; - - sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0); - dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable"); -} - -static int sh_irda_set_timeout(struct sh_irda_self *self, int interval) -{ - struct device *dev = &self->ndev->dev; - - if (SH_IRDA_SIR != self->mode) - interval = 0; - - if (interval < 0 || interval > 2) { - dev_err(dev, "unsupported timeout interval\n"); - return -EINVAL; - } - - sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT); - return 0; -} - -static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate) -{ - struct device *dev = &self->ndev->dev; - u16 val; - - if (baudrate < 0) - return 0; - - if (SH_IRDA_SIR != self->mode) { - dev_err(dev, "it is not SIR mode\n"); - return -EINVAL; - } - - /* - * Baud rate (bits/s) = - * (48 MHz / 26) / (baud rate counter value + 1) x 16 - */ - val = (48000000 / 26 / 16 / baudrate) - 1; - dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val); - - sh_irda_update_bits(self, SIRBCR, BRC_MASK, val); - - return 0; -} - -static int sh_irda_get_rcv_length(struct sh_irda_self *self) -{ - return RFL_MASK & sh_irda_read(self, IRRFLR); -} - -/*===================================== - * - * NONE MODE - * - *=====================================*/ -static int sh_irda_xir_fre(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: frame recv\n"); - return 0; -} - -static int sh_irda_xir_trov(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: buffer ram over\n"); - return 0; -} - -static int sh_irda_xir_9(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: time over\n"); - return 0; -} - -static int sh_irda_xir_8(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: framing error\n"); - return 0; -} - -static int sh_irda_xir_fte(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - dev_err(dev, "none mode: frame transmit end\n"); - return 0; -} - -static struct sh_irda_xir_func sh_irda_xir_func = { - .xir_fre = sh_irda_xir_fre, - .xir_trov = sh_irda_xir_trov, - .xir_9 = sh_irda_xir_9, - .xir_8 = sh_irda_xir_8, - .xir_fte = sh_irda_xir_fte, -}; - -/*===================================== - * - * MIR/FIR MODE - * - * MIR/FIR are not supported now - *=====================================*/ -static struct sh_irda_xir_func sh_irda_mfir_func = { - .xir_fre = sh_irda_xir_fre, - .xir_trov = sh_irda_xir_trov, - .xir_9 = sh_irda_xir_9, - .xir_8 = sh_irda_xir_8, - .xir_fte = sh_irda_xir_fte, -}; - -/*===================================== - * - * SIR MODE - * - *=====================================*/ -static int sh_irda_sir_fre(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - u16 data16; - u8 *data = (u8 *)&data16; - int len = sh_irda_get_rcv_length(self); - int i, j; - - if (len > IRDARAM_LEN) - len = IRDARAM_LEN; - - dev_dbg(dev, "frame recv length = %d\n", len); - - for (i = 0; i < len; i++) { - j = i % 2; - if (!j) - data16 = sh_irda_read(self, IRDARAM + i); - - async_unwrap_char(self->ndev, &self->ndev->stats, - &self->rx_buff, data[j]); - } - self->ndev->last_rx = jiffies; - - sh_irda_rcv_ctrl(self, 1); - - return 0; -} - -static int sh_irda_sir_trov(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_err(dev, "buffer ram over\n"); - sh_irda_rcv_ctrl(self, 1); - return 0; -} - -static int sh_irda_sir_tot(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_err(dev, "time over\n"); - sh_irda_set_baudrate(self, 9600); - sh_irda_rcv_ctrl(self, 1); - return 0; -} - -static int sh_irda_sir_fer(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_err(dev, "framing error\n"); - sh_irda_rcv_ctrl(self, 1); - return 0; -} - -static int sh_irda_sir_fte(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - - dev_dbg(dev, "frame transmit end\n"); - netif_wake_queue(self->ndev); - - return 0; -} - -static struct sh_irda_xir_func sh_irda_sir_func = { - .xir_fre = sh_irda_sir_fre, - .xir_trov = sh_irda_sir_trov, - .xir_9 = sh_irda_sir_tot, - .xir_8 = sh_irda_sir_fer, - .xir_fte = sh_irda_sir_fte, -}; - -static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode) -{ - struct device *dev = &self->ndev->dev; - struct sh_irda_xir_func *func; - const char *name; - u16 data; - - switch (mode) { - case SH_IRDA_SIR: - name = "SIR"; - data = TMD_SIR; - func = &sh_irda_sir_func; - break; - case SH_IRDA_MIR: - name = "MIR"; - data = TMD_MIR; - func = &sh_irda_mfir_func; - break; - case SH_IRDA_FIR: - name = "FIR"; - data = TMD_FIR; - func = &sh_irda_mfir_func; - break; - default: - name = "NONE"; - data = 0; - func = &sh_irda_xir_func; - break; - } - - self->mode = mode; - self->xir_func = func; - sh_irda_update_bits(self, IRTMR, TMD_MASK, data); - - dev_dbg(dev, "switch to %s mode", name); -} - -/************************************************************************ - - - irq function - - -************************************************************************/ -static void sh_irda_set_irq_mask(struct sh_irda_self *self) -{ - u16 tmr_hole; - u16 xir_reg; - - /* set all mask */ - sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK); - sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK); - sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK); - - /* clear irq */ - sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK); - sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK); - - switch (self->mode) { - case SH_IRDA_SIR: - tmr_hole = SIM; - xir_reg = SIRIMR; - break; - case SH_IRDA_MIR: - case SH_IRDA_FIR: - tmr_hole = MIM; - xir_reg = MFIRIMR; - break; - default: - tmr_hole = 0; - xir_reg = 0; - break; - } - - /* open mask */ - if (xir_reg) { - sh_irda_update_bits(self, IRTMR, tmr_hole, 0); - sh_irda_update_bits(self, xir_reg, xIR_MASK, 0); - } -} - -static irqreturn_t sh_irda_irq(int irq, void *dev_id) -{ - struct sh_irda_self *self = dev_id; - struct sh_irda_xir_func *func = self->xir_func; - u16 isr = sh_irda_read(self, SIRISR); - - /* clear irq */ - sh_irda_write(self, SIRICR, isr); - - if (isr & FRE) - func->xir_fre(self); - if (isr & TROV) - func->xir_trov(self); - if (isr & xIR_9) - func->xir_9(self); - if (isr & xIR_8) - func->xir_8(self); - if (isr & FTE) - func->xir_fte(self); - - return IRQ_HANDLED; -} - -/************************************************************************ - - - CRC function - - -************************************************************************/ -static void sh_irda_crc_reset(struct sh_irda_self *self) -{ - sh_irda_write(self, CRCCTR, CRC_RST); -} - -static void sh_irda_crc_add(struct sh_irda_self *self, u16 data) -{ - sh_irda_write(self, CRCIR, data & CRC_IN_MASK); -} - -static u16 sh_irda_crc_cnt(struct sh_irda_self *self) -{ - return CRC_CT_MASK & sh_irda_read(self, CRCCTR); -} - -static u16 sh_irda_crc_out(struct sh_irda_self *self) -{ - return sh_irda_read(self, CRCOR); -} - -static int sh_irda_crc_init(struct sh_irda_self *self) -{ - struct device *dev = &self->ndev->dev; - int ret = -EIO; - u16 val; - - sh_irda_crc_reset(self); - - sh_irda_crc_add(self, 0xCC); - sh_irda_crc_add(self, 0xF5); - sh_irda_crc_add(self, 0xF1); - sh_irda_crc_add(self, 0xA7); - - val = sh_irda_crc_cnt(self); - if (4 != val) { - dev_err(dev, "CRC count error %x\n", val); - goto crc_init_out; - } - - val = sh_irda_crc_out(self); - if (0x51DF != val) { - dev_err(dev, "CRC result error%x\n", val); - goto crc_init_out; - } - - ret = 0; - -crc_init_out: - - sh_irda_crc_reset(self); - return ret; -} - -/************************************************************************ - - - iobuf function - - -************************************************************************/ -static void sh_irda_remove_iobuf(struct sh_irda_self *self) -{ - kfree(self->rx_buff.head); - - self->tx_buff.head = NULL; - self->tx_buff.data = NULL; - self->rx_buff.head = NULL; - self->rx_buff.data = NULL; -} - -static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize) -{ - if (self->rx_buff.head || - self->tx_buff.head) { - dev_err(&self->ndev->dev, "iobuff has already existed."); - return -EINVAL; - } - - /* rx_buff */ - self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL); - if (!self->rx_buff.head) - return -ENOMEM; - - self->rx_buff.truesize = rxsize; - self->rx_buff.in_frame = FALSE; - self->rx_buff.state = OUTSIDE_FRAME; - self->rx_buff.data = self->rx_buff.head; - - /* tx_buff */ - self->tx_buff.head = self->membase + IRDARAM; - self->tx_buff.truesize = IRDARAM_LEN; - - return 0; -} - -/************************************************************************ - - - net_device_ops function - - -************************************************************************/ -static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - struct device *dev = &self->ndev->dev; - int speed = irda_get_next_speed(skb); - int ret; - - dev_dbg(dev, "hard xmit\n"); - - netif_stop_queue(ndev); - sh_irda_rcv_ctrl(self, 0); - - ret = sh_irda_set_baudrate(self, speed); - if (ret < 0) - goto sh_irda_hard_xmit_end; - - self->tx_buff.len = 0; - if (skb->len) { - unsigned long flags; - - spin_lock_irqsave(&self->lock, flags); - self->tx_buff.len = async_wrap_skb(skb, - self->tx_buff.head, - self->tx_buff.truesize); - spin_unlock_irqrestore(&self->lock, flags); - - if (self->tx_buff.len > self->tx_buff.truesize) - self->tx_buff.len = self->tx_buff.truesize; - - sh_irda_write(self, IRTFLR, self->tx_buff.len); - sh_irda_write(self, IRTCTR, ARMOD | TE); - } else - goto sh_irda_hard_xmit_end; - - dev_kfree_skb(skb); - - return 0; - -sh_irda_hard_xmit_end: - sh_irda_set_baudrate(self, 9600); - netif_wake_queue(self->ndev); - sh_irda_rcv_ctrl(self, 1); - dev_kfree_skb(skb); - - return ret; - -} - -static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd) -{ - /* - * FIXME - * - * This function is needed for irda framework. - * But nothing to do now - */ - return 0; -} - -static struct net_device_stats *sh_irda_stats(struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - - return &self->ndev->stats; -} - -static int sh_irda_open(struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - int err; - - pm_runtime_get_sync(&self->pdev->dev); - err = sh_irda_crc_init(self); - if (err) - goto open_err; - - sh_irda_set_mode(self, SH_IRDA_SIR); - sh_irda_set_timeout(self, 2); - sh_irda_set_baudrate(self, 9600); - - self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); - if (!self->irlap) { - err = -ENODEV; - goto open_err; - } - - netif_start_queue(ndev); - sh_irda_rcv_ctrl(self, 1); - sh_irda_set_irq_mask(self); - - dev_info(&ndev->dev, "opened\n"); - - return 0; - -open_err: - pm_runtime_put_sync(&self->pdev->dev); - - return err; -} - -static int sh_irda_stop(struct net_device *ndev) -{ - struct sh_irda_self *self = netdev_priv(ndev); - - /* Stop IrLAP */ - if (self->irlap) { - irlap_close(self->irlap); - self->irlap = NULL; - } - - netif_stop_queue(ndev); - pm_runtime_put_sync(&self->pdev->dev); - - dev_info(&ndev->dev, "stopped\n"); - - return 0; -} - -static const struct net_device_ops sh_irda_ndo = { - .ndo_open = sh_irda_open, - .ndo_stop = sh_irda_stop, - .ndo_start_xmit = sh_irda_hard_xmit, - .ndo_do_ioctl = sh_irda_ioctl, - .ndo_get_stats = sh_irda_stats, -}; - -/************************************************************************ - - - platform_driver function - - -************************************************************************/ -static int sh_irda_probe(struct platform_device *pdev) -{ - struct net_device *ndev; - struct sh_irda_self *self; - struct resource *res; - int irq; - int err = -ENOMEM; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (!res || irq < 0) { - dev_err(&pdev->dev, "Not enough platform resources.\n"); - goto exit; - } - - ndev = alloc_irdadev(sizeof(*self)); - if (!ndev) - goto exit; - - self = netdev_priv(ndev); - self->membase = ioremap_nocache(res->start, resource_size(res)); - if (!self->membase) { - err = -ENXIO; - dev_err(&pdev->dev, "Unable to ioremap.\n"); - goto err_mem_1; - } - - err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); - if (err) - goto err_mem_2; - - self->pdev = pdev; - pm_runtime_enable(&pdev->dev); - - irda_init_max_qos_capabilies(&self->qos); - - ndev->netdev_ops = &sh_irda_ndo; - ndev->irq = irq; - - self->ndev = ndev; - self->qos.baud_rate.bits &= IR_9600; /* FIXME */ - self->qos.min_turn_time.bits = 1; /* 10 ms or more */ - spin_lock_init(&self->lock); - - irda_qos_bits_to_value(&self->qos); - - err = register_netdev(ndev); - if (err) - goto err_mem_4; - - platform_set_drvdata(pdev, ndev); - err = devm_request_irq(&pdev->dev, irq, sh_irda_irq, 0, "sh_irda", self); - if (err) { - dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n"); - goto err_mem_4; - } - - dev_info(&pdev->dev, "SuperH IrDA probed\n"); - - goto exit; - -err_mem_4: - pm_runtime_disable(&pdev->dev); - sh_irda_remove_iobuf(self); -err_mem_2: - iounmap(self->membase); -err_mem_1: - free_netdev(ndev); -exit: - return err; -} - -static int sh_irda_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - struct sh_irda_self *self = netdev_priv(ndev); - - if (!self) - return 0; - - unregister_netdev(ndev); - pm_runtime_disable(&pdev->dev); - sh_irda_remove_iobuf(self); - iounmap(self->membase); - free_netdev(ndev); - - return 0; -} - -static int sh_irda_runtime_nop(struct device *dev) -{ - /* Runtime PM callback shared between ->runtime_suspend() - * and ->runtime_resume(). Simply returns success. - * - * This driver re-initializes all registers after - * pm_runtime_get_sync() anyway so there is no need - * to save and restore registers here. - */ - return 0; -} - -static const struct dev_pm_ops sh_irda_pm_ops = { - .runtime_suspend = sh_irda_runtime_nop, - .runtime_resume = sh_irda_runtime_nop, -}; - -static struct platform_driver sh_irda_driver = { - .probe = sh_irda_probe, - .remove = sh_irda_remove, - .driver = { - .name = DRIVER_NAME, - .pm = &sh_irda_pm_ops, - }, -}; - -module_platform_driver(sh_irda_driver); - -MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>"); -MODULE_DESCRIPTION("SuperH IrDA driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 510e90a6bb26..9abc36bf77ea 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -861,7 +861,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) goto drop; if (skb->sk && sk_fullsock(skb->sk)) { - sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(skb->sk, skb->sk->sk_tsflags, + &skb_shinfo(skb)->tx_flags); sw_tx_timestamp(skb); } diff --git a/include/net/ip.h b/include/net/ip.h index fad74d323bd6..93725e546758 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -56,6 +56,7 @@ static inline unsigned int ip_hdrlen(const struct sk_buff *skb) } struct ipcm_cookie { + struct sockcm_cookie sockc; __be32 addr; int oif; struct ip_options_rcu *opt; @@ -550,7 +551,7 @@ int ip_options_rcv_srr(struct sk_buff *skb); void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb); void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset); -int ip_cmsg_send(struct net *net, struct msghdr *msg, +int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6); int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen); diff --git a/include/net/ipv6.h b/include/net/ipv6.h index d0aeb97aec5d..55ee1eb7d026 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -867,7 +867,8 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, int dontfrag); + struct rt6_info *rt, unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc); int ip6_push_pending_frames(struct sock *sk); @@ -884,7 +885,8 @@ struct sk_buff *ip6_make_skb(struct sock *sk, void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, - unsigned int flags, int dontfrag); + unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc); static inline struct sk_buff *ip6_finish_skb(struct sock *sk) { diff --git a/include/net/sock.h b/include/net/sock.h index 255d3e03727b..e91b87f54f99 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1418,8 +1418,11 @@ void sk_send_sigurg(struct sock *sk); struct sockcm_cookie { u32 mark; + u16 tsflags; }; +int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, + struct sockcm_cookie *sockc); int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc); @@ -2054,19 +2057,21 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, sk->sk_stamp = skb->tstamp; } -void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags); +void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); /** * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet + * @tsflags: timestamping flags to use * @tx_flags: completed with instructions for time stamping * * Note : callers should take care of initial *tx_flags value (usually 0) */ -static inline void sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +static inline void sock_tx_timestamp(const struct sock *sk, __u16 tsflags, + __u8 *tx_flags) { - if (unlikely(sk->sk_tsflags)) - __sock_tx_timestamp(sk, tx_flags); + if (unlikely(tsflags)) + __sock_tx_timestamp(tsflags, tx_flags); if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) *tx_flags |= SKBTX_WIFI_STATUS; } diff --git a/include/net/tcp.h b/include/net/tcp.h index f8bb4a4ed3d1..a23282996ca9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -754,7 +754,8 @@ struct tcp_skb_cb { TCPCB_REPAIRED) __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */ - /* 1 byte hole */ + __u8 txstamp_ack:1, /* Record TX timestamp for ack? */ + unused:7; __u32 ack_seq; /* Sequence number ACK'd */ union { struct inet_skb_parm h4; diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index b927413dde86..2b1c3450ab20 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h @@ -42,7 +42,8 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag); + int *hlimit, int *tclass, int *dontfrag, + struct sockcm_cookie *sockc); void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp, __u16 srcp, __u16 destp, int bucket); diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h index 6d1abea9746e..264e515de16f 100644 --- a/include/uapi/linux/net_tstamp.h +++ b/include/uapi/linux/net_tstamp.h @@ -31,6 +31,16 @@ enum { SOF_TIMESTAMPING_LAST }; +/* + * SO_TIMESTAMPING flags are either for recording a packet timestamp or for + * reporting the timestamp to user space. + * Recording flags can be set both via socket options and control messages. + */ +#define SOF_TIMESTAMPING_TX_RECORD_MASK (SOF_TIMESTAMPING_TX_HARDWARE | \ + SOF_TIMESTAMPING_TX_SOFTWARE | \ + SOF_TIMESTAMPING_TX_SCHED | \ + SOF_TIMESTAMPING_TX_ACK) + /** * struct hwtstamp_config - %SIOCGHWTSTAMP and %SIOCSHWTSTAMP parameter * diff --git a/net/can/raw.c b/net/can/raw.c index 2e67b1423cd3..972c187d40ab 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -755,7 +755,7 @@ static int raw_sendmsg(struct socket *sock, struct msghdr *msg, size_t size) if (err < 0) goto free_skb; - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sk->sk_tsflags, &skb_shinfo(skb)->tx_flags); skb->dev = dev; skb->sk = sk; diff --git a/net/core/sock.c b/net/core/sock.c index b67b9aedb230..315f5e57fffe 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -832,7 +832,8 @@ set_rcvbuf: !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)) { if (sk->sk_protocol == IPPROTO_TCP && sk->sk_type == SOCK_STREAM) { - if (sk->sk_state != TCP_ESTABLISHED) { + if ((1 << sk->sk_state) & + (TCPF_CLOSE | TCPF_LISTEN)) { ret = -EINVAL; break; } @@ -1866,27 +1867,51 @@ struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size, } EXPORT_SYMBOL(sock_alloc_send_skb); +int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg, + struct sockcm_cookie *sockc) +{ + u32 tsflags; + + switch (cmsg->cmsg_type) { + case SO_MARK: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) + return -EPERM; + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + sockc->mark = *(u32 *)CMSG_DATA(cmsg); + break; + case SO_TIMESTAMPING: + if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) + return -EINVAL; + + tsflags = *(u32 *)CMSG_DATA(cmsg); + if (tsflags & ~SOF_TIMESTAMPING_TX_RECORD_MASK) + return -EINVAL; + + sockc->tsflags &= ~SOF_TIMESTAMPING_TX_RECORD_MASK; + sockc->tsflags |= tsflags; + break; + default: + return -EINVAL; + } + return 0; +} +EXPORT_SYMBOL(__sock_cmsg_send); + int sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct sockcm_cookie *sockc) { struct cmsghdr *cmsg; + int ret; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_SOCKET) continue; - switch (cmsg->cmsg_type) { - case SO_MARK: - if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) - return -EPERM; - if (cmsg->cmsg_len != CMSG_LEN(sizeof(u32))) - return -EINVAL; - sockc->mark = *(u32 *)CMSG_DATA(cmsg); - break; - default: - return -EINVAL; - } + ret = __sock_cmsg_send(sk, msg, cmsg, sockc); + if (ret) + return ret; } return 0; } diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 035ad645a8d9..1b7c0776c805 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -219,11 +219,12 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, } EXPORT_SYMBOL(ip_cmsg_recv_offset); -int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, +int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc, bool allow_ipv6) { int err, val; struct cmsghdr *cmsg; + struct net *net = sock_net(sk); for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) @@ -244,6 +245,12 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc, continue; } #endif + if (cmsg->cmsg_level == SOL_SOCKET) { + if (__sock_cmsg_send(sk, msg, cmsg, &ipc->sockc)) + return -EINVAL; + continue; + } + if (cmsg->cmsg_level != SOL_IP) continue; switch (cmsg->cmsg_type) { diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index cf9700b1a106..66ddcb60519a 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -737,6 +737,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) /* no remote port */ } + ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.oif = sk->sk_bound_dev_if; @@ -744,10 +745,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.ttl = 0; ipc.tos = -1; - sock_tx_timestamp(sk, &ipc.tx_flags); - if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, false); + err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); return err; @@ -768,6 +767,8 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) rcu_read_unlock(); } + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); + saddr = ipc.addr; ipc.addr = faddr = daddr; diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 8d22de74080c..438f50c1a676 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -339,8 +339,8 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, struct msghdr *msg, size_t length, - struct rtable **rtp, - unsigned int flags) + struct rtable **rtp, unsigned int flags, + const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); @@ -379,7 +379,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->ip_summed = CHECKSUM_NONE; - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb->transport_header = skb->network_header; err = -EFAULT; @@ -540,6 +540,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) daddr = inet->inet_daddr; } + ipc.sockc.tsflags = sk->sk_tsflags; ipc.addr = inet->inet_saddr; ipc.opt = NULL; ipc.tx_flags = 0; @@ -548,7 +549,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ipc.oif = sk->sk_bound_dev_if; if (msg->msg_controllen) { - err = ip_cmsg_send(net, msg, &ipc, false); + err = ip_cmsg_send(sk, msg, &ipc, false); if (unlikely(err)) { kfree(ipc.opt); goto out; @@ -638,10 +639,10 @@ back_from_confirm: if (inet->hdrincl) err = raw_send_hdrinc(sk, &fl4, msg, len, - &rt, msg->msg_flags); + &rt, msg->msg_flags, &ipc.sockc); else { - sock_tx_timestamp(sk, &ipc.tx_flags); + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); if (!ipc.addr) ipc.addr = fl4.daddr; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 08b8b960a8ed..4d73858991af 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -428,14 +428,16 @@ void tcp_init_sock(struct sock *sk) } EXPORT_SYMBOL(tcp_init_sock); -static void tcp_tx_timestamp(struct sock *sk, struct sk_buff *skb) +static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) { - if (sk->sk_tsflags) { + if (sk->sk_tsflags || tsflags) { struct skb_shared_info *shinfo = skb_shinfo(skb); + struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); - sock_tx_timestamp(sk, &shinfo->tx_flags); + sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); if (shinfo->tx_flags & SKBTX_ANY_TSTAMP) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; + tcb->txstamp_ack = !!(shinfo->tx_flags & SKBTX_ACK_TSTAMP); } } @@ -957,7 +959,7 @@ new_segment: offset += copy; size -= copy; if (!size) { - tcp_tx_timestamp(sk, skb); + tcp_tx_timestamp(sk, sk->sk_tsflags, skb); goto out; } @@ -1077,6 +1079,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; + struct sockcm_cookie sockc; int flags, err, copied = 0; int mss_now = 0, size_goal, copied_syn = 0; bool sg; @@ -1119,6 +1122,15 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) /* 'common' sending to sendq */ } + sockc.tsflags = sk->sk_tsflags; + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (unlikely(err)) { + err = -EINVAL; + goto out_err; + } + } + /* This should be in poll */ sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); @@ -1237,7 +1249,7 @@ new_segment: copied += copy; if (!msg_data_left(msg)) { - tcp_tx_timestamp(sk, skb); + tcp_tx_timestamp(sk, sockc.tsflags, skb); goto out; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f87b84a75691..a26e2d262358 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3082,7 +3082,7 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, const struct skb_shared_info *shinfo; /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ - if (likely(!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK))) + if (likely(!TCP_SKB_CB(skb)->txstamp_ack)) return; shinfo = skb_shinfo(skb); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 08eed5e16df0..45ff590661f4 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1027,15 +1027,13 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) */ connected = 1; } - ipc.addr = inet->inet_saddr; + ipc.sockc.tsflags = sk->sk_tsflags; + ipc.addr = inet->inet_saddr; ipc.oif = sk->sk_bound_dev_if; - sock_tx_timestamp(sk, &ipc.tx_flags); - if (msg->msg_controllen) { - err = ip_cmsg_send(sock_net(sk), msg, &ipc, - sk->sk_family == AF_INET6); + err = ip_cmsg_send(sk, msg, &ipc, sk->sk_family == AF_INET6); if (unlikely(err)) { kfree(ipc.opt); return err; @@ -1060,6 +1058,8 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) saddr = ipc.addr; ipc.addr = faddr = daddr; + sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags); + if (ipc.opt && ipc.opt->opt.srr) { if (!daddr) return -EINVAL; diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 428162155280..a73d70119fcd 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -685,7 +685,8 @@ EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg, struct flowi6 *fl6, struct ipv6_txoptions *opt, - int *hlimit, int *tclass, int *dontfrag) + int *hlimit, int *tclass, int *dontfrag, + struct sockcm_cookie *sockc) { struct in6_pktinfo *src_info; struct cmsghdr *cmsg; @@ -702,6 +703,12 @@ int ip6_datagram_send_ctl(struct net *net, struct sock *sk, goto exit_f; } + if (cmsg->cmsg_level == SOL_SOCKET) { + if (__sock_cmsg_send(sk, msg, cmsg, sockc)) + return -EINVAL; + continue; + } + if (cmsg->cmsg_level != SOL_IPV6) continue; diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 0a37ddc7af51..6b573ebe49de 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -400,6 +400,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) struct icmp6hdr tmp_hdr; struct flowi6 fl6; struct icmpv6_msg msg; + struct sockcm_cookie sockc_unused = {0}; int iif = 0; int addr_type = 0; int len; @@ -527,7 +528,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info) len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, np->tclass, NULL, &fl6, (struct rt6_info *)dst, - MSG_DONTWAIT, np->dontfrag); + MSG_DONTWAIT, np->dontfrag, &sockc_unused); if (err) { ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS); ip6_flush_pending_frames(sk); @@ -566,6 +567,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) int hlimit; u8 tclass; u32 mark = IP6_REPLY_MARK(net, skb->mark); + struct sockcm_cookie sockc_unused = {0}; saddr = &ipv6_hdr(skb)->daddr; @@ -617,7 +619,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb) err = ip6_append_data(sk, icmpv6_getfrag, &msg, skb->len + sizeof(struct icmp6hdr), sizeof(struct icmp6hdr), hlimit, tclass, NULL, &fl6, (struct rt6_info *)dst, MSG_DONTWAIT, - np->dontfrag); + np->dontfrag, &sockc_unused); if (err) { ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index dc2db4f7b182..35d3ddc328f8 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c @@ -372,6 +372,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, if (olen > 0) { struct msghdr msg; struct flowi6 flowi6; + struct sockcm_cookie sockc_junk; int junk; err = -ENOMEM; @@ -390,7 +391,7 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, memset(&flowi6, 0, sizeof(flowi6)); err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, - &junk, &junk, &junk); + &junk, &junk, &junk, &sockc_junk); if (err) goto done; err = -EINVAL; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 9428345d3a07..612f3d138bf0 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1258,7 +1258,8 @@ static int __ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, - unsigned int flags, int dontfrag) + unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc) { struct sk_buff *skb, *skb_prev = NULL; unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu; @@ -1329,7 +1330,7 @@ emsgsize: csummode = CHECKSUM_PARTIAL; if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) { - sock_tx_timestamp(sk, &tx_flags); + sock_tx_timestamp(sk, sockc->tsflags, &tx_flags); if (tx_flags & SKBTX_ANY_SW_TSTAMP && sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) tskey = sk->sk_tskey++; @@ -1565,7 +1566,8 @@ int ip6_append_data(struct sock *sk, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, - struct rt6_info *rt, unsigned int flags, int dontfrag) + struct rt6_info *rt, unsigned int flags, int dontfrag, + const struct sockcm_cookie *sockc) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); @@ -1593,7 +1595,8 @@ int ip6_append_data(struct sock *sk, return __ip6_append_data(sk, fl6, &sk->sk_write_queue, &inet->cork.base, &np->cork, sk_page_frag(sk), getfrag, - from, length, transhdrlen, flags, dontfrag); + from, length, transhdrlen, flags, dontfrag, + sockc); } EXPORT_SYMBOL_GPL(ip6_append_data); @@ -1752,7 +1755,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6, struct rt6_info *rt, unsigned int flags, - int dontfrag) + int dontfrag, const struct sockcm_cookie *sockc) { struct inet_cork_full cork; struct inet6_cork v6_cork; @@ -1779,7 +1782,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk, err = __ip6_append_data(sk, fl6, &queue, &cork.base, &v6_cork, ¤t->task_frag, getfrag, from, length + exthdrlen, transhdrlen + exthdrlen, - flags, dontfrag); + flags, dontfrag, sockc); if (err) { __ip6_flush_pending_frames(sk, &queue, &cork, &v6_cork); return ERR_PTR(err); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 4449ad1f8114..a5557d22f89e 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -471,6 +471,7 @@ sticky_done: struct ipv6_txoptions *opt = NULL; struct msghdr msg; struct flowi6 fl6; + struct sockcm_cookie sockc_junk; int junk; memset(&fl6, 0, sizeof(fl6)); @@ -503,7 +504,7 @@ sticky_done: msg.msg_control = (void *)(opt+1); retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, - &junk, &junk); + &junk, &junk, &sockc_junk); if (retv) goto done; update: diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index c382db7a2e73..da1cff79e447 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -62,6 +62,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst; struct rt6_info *rt; struct pingfakehdr pfh; + struct sockcm_cookie junk = {0}; pr_debug("ping_v6_sendmsg(sk=%p,sk->num=%u)\n", inet, inet->inet_num); @@ -144,7 +145,7 @@ static int ping_v6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) err = ip6_append_data(sk, ping_getfrag, &pfh, len, 0, hlimit, np->tclass, NULL, &fl6, rt, - MSG_DONTWAIT, np->dontfrag); + MSG_DONTWAIT, np->dontfrag, &junk); if (err) { ICMP6_INC_STATS(sock_net(sk), rt->rt6i_idev, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index fa59dd7a427e..b07ce21983aa 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -745,6 +745,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct dst_entry *dst = NULL; struct raw6_frag_vec rfv; struct flowi6 fl6; + struct sockcm_cookie sockc; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; @@ -821,13 +822,15 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) if (fl6.flowi6_oif == 0) fl6.flowi6_oif = sk->sk_bound_dev_if; + sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + &hlimit, &tclass, &dontfrag, + &sockc); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -897,7 +900,7 @@ back_from_confirm: lock_sock(sk); err = ip6_append_data(sk, raw6_getfrag, &rfv, len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag); + msg->msg_flags, dontfrag, &sockc); if (err) ip6_flush_pending_frames(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 8125931106be..b772a7641fbd 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1128,6 +1128,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) int connected = 0; int is_udplite = IS_UDPLITE(sk); int (*getfrag)(void *, char *, int, int, int, struct sk_buff *); + struct sockcm_cookie sockc; /* destination address check */ if (sin6) { @@ -1247,6 +1248,7 @@ do_udp_sendmsg: fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex; fl6.flowi6_mark = sk->sk_mark; + sockc.tsflags = sk->sk_tsflags; if (msg->msg_controllen) { opt = &opt_space; @@ -1254,7 +1256,8 @@ do_udp_sendmsg: opt->tot_len = sizeof(*opt); err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); + &hlimit, &tclass, &dontfrag, + &sockc); if (err < 0) { fl6_sock_release(flowlabel); return err; @@ -1321,7 +1324,7 @@ back_from_confirm: skb = ip6_make_skb(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag); + msg->msg_flags, dontfrag, &sockc); err = PTR_ERR(skb); if (!IS_ERR_OR_NULL(skb)) err = udp_v6_send_skb(skb, &fl6); @@ -1348,7 +1351,8 @@ do_append_data: err = ip6_append_data(sk, getfrag, msg, ulen, sizeof(struct udphdr), hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag); + corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag, + &sockc); if (err) udp_v6_flush_pending_frames(sk); else if (!corkreq) diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 6b54ff3ff4cb..1a38f20b1ca6 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -492,6 +492,7 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi6 fl6; + struct sockcm_cookie sockc_unused = {0}; int addr_len = msg->msg_namelen; int hlimit = -1; int tclass = -1; @@ -562,9 +563,10 @@ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); - err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, - &hlimit, &tclass, &dontfrag); - if (err < 0) { + err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, + &hlimit, &tclass, &dontfrag, + &sockc_unused); + if (err < 0) { fl6_sock_release(flowlabel); return err; } @@ -625,7 +627,7 @@ back_from_confirm: err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, hlimit, tclass, opt, &fl6, (struct rt6_info *)dst, - msg->msg_flags, dontfrag); + msg->msg_flags, dontfrag, &sockc_unused); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 1ecfa710ca98..0007e23202e4 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1837,6 +1837,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name); struct sk_buff *skb = NULL; struct net_device *dev; + struct sockcm_cookie sockc; __be16 proto = 0; int err; int extra_len = 0; @@ -1925,12 +1926,21 @@ retry: goto out_unlock; } + sockc.tsflags = 0; + if (msg->msg_controllen) { + err = sock_cmsg_send(sk, msg, &sockc); + if (unlikely(err)) { + err = -EINVAL; + goto out_unlock; + } + } + skb->protocol = proto; skb->dev = dev; skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (unlikely(extra_len == 4)) skb->no_fcs = 1; @@ -2486,7 +2496,8 @@ static int packet_snd_vnet_gso(struct sk_buff *skb, static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, void *frame, struct net_device *dev, void *data, int tp_len, - __be16 proto, unsigned char *addr, int hlen, int copylen) + __be16 proto, unsigned char *addr, int hlen, int copylen, + const struct sockcm_cookie *sockc) { union tpacket_uhdr ph; int to_write, offset, len, nr_frags, len_max; @@ -2500,7 +2511,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->dev = dev; skb->priority = po->sk.sk_priority; skb->mark = po->sk.sk_mark; - sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); skb_shinfo(skb)->destructor_arg = ph.raw; skb_reserve(skb, hlen); @@ -2624,6 +2635,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) struct sk_buff *skb; struct net_device *dev; struct virtio_net_hdr *vnet_hdr = NULL; + struct sockcm_cookie sockc; __be16 proto; int err, reserve = 0; void *ph; @@ -2655,6 +2667,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); } + sockc.tsflags = 0; + if (msg->msg_controllen) { + err = sock_cmsg_send(&po->sk, msg, &sockc); + if (unlikely(err)) + goto out; + } + err = -ENXIO; if (unlikely(dev == NULL)) goto out; @@ -2712,7 +2731,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) goto out_status; } tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto, - addr, hlen, copylen); + addr, hlen, copylen, &sockc); if (likely(tp_len >= 0) && tp_len > dev->mtu + reserve && !po->has_vnet_hdr && @@ -2851,6 +2870,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) if (unlikely(!(dev->flags & IFF_UP))) goto out_unlock; + sockc.tsflags = 0; sockc.mark = sk->sk_mark; if (msg->msg_controllen) { err = sock_cmsg_send(sk, msg, &sockc); @@ -2908,7 +2928,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) goto out_free; } - sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags); + sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags); if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) && !packet_extra_vlan_len_allowed(dev, skb)) { diff --git a/net/socket.c b/net/socket.c index 5f77a8e93830..979d3146b081 100644 --- a/net/socket.c +++ b/net/socket.c @@ -587,20 +587,20 @@ void sock_release(struct socket *sock) } EXPORT_SYMBOL(sock_release); -void __sock_tx_timestamp(const struct sock *sk, __u8 *tx_flags) +void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags) { u8 flags = *tx_flags; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_HARDWARE) + if (tsflags & SOF_TIMESTAMPING_TX_HARDWARE) flags |= SKBTX_HW_TSTAMP; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) + if (tsflags & SOF_TIMESTAMPING_TX_SOFTWARE) flags |= SKBTX_SW_TSTAMP; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED) + if (tsflags & SOF_TIMESTAMPING_TX_SCHED) flags |= SKBTX_SCHED_TSTAMP; - if (sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK) + if (tsflags & SOF_TIMESTAMPING_TX_ACK) flags |= SKBTX_ACK_TSTAMP; *tx_flags = flags; |