diff options
79 files changed, 5775 insertions, 670 deletions
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt index 30d487597ecb..fb40891ee606 100644 --- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt +++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt @@ -6,9 +6,13 @@ Required properties: - reg: addresses and length of the register sets for the device, must be 6 pairs of register addresses and lengths - interrupts: interrupts for the devices, must be two interrupts +- #address-cells: must be 1, see dsa/dsa.txt +- #size-cells: must be 0, see dsa/dsa.txt + +Deprecated binding required properties: + - dsa,mii-bus: phandle to the MDIO bus controller, see dsa/dsa.txt - dsa,ethernet: phandle to the CPU network interface controller, see dsa/dsa.txt -- #size-cells: must be 0 - #address-cells: must be 2, see dsa/dsa.txt Subnodes: @@ -48,6 +52,45 @@ switch_top@f0b00000 { ethernet_switch@0 { compatible = "brcm,bcm7445-switch-v4.0"; #size-cells = <0>; + #address-cells = <1>; + reg = <0x0 0x40000 + 0x40000 0x110 + 0x40340 0x30 + 0x40380 0x30 + 0x40400 0x34 + 0x40600 0x208>; + reg-names = "core", "reg", intrl2_0", "intrl2_1", + "fcb, "acb"; + interrupts = <0 0x18 0 + 0 0x19 0>; + brcm,num-gphy = <1>; + brcm,num-rgmii-ports = <2>; + brcm,fcb-pause-override; + brcm,acb-packets-inflight; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + label = "gphy"; + reg = <0>; + }; + }; + }; +}; + +Example using the old DSA DeviceTree binding: + +switch_top@f0b00000 { + compatible = "simple-bus"; + #size-cells = <1>; + #address-cells = <1>; + ranges = <0 0xf0b00000 0x40804>; + + ethernet_switch@0 { + compatible = "brcm,bcm7445-switch-v4.0"; + #size-cells = <0>; #address-cells = <2>; reg = <0x0 0x40000 0x40000 0x110 diff --git a/MAINTAINERS b/MAINTAINERS index e902b635cbfc..99f95279e45b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9936,6 +9936,7 @@ F: net/rfkill/ RHASHTABLE M: Thomas Graf <tgraf@suug.ch> +M: Herbert Xu <herbert@gondor.apana.org.au> L: netdev@vger.kernel.org S: Maintained F: lib/rhashtable.c diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c index 75dde903b238..81aaa505862c 100644 --- a/drivers/atm/fore200e.c +++ b/drivers/atm/fore200e.c @@ -2489,7 +2489,7 @@ static int fore200e_load_and_start_fw(struct fore200e *fore200e) { const struct firmware *firmware; struct device *device; - struct fw_header *fw_header; + const struct fw_header *fw_header; const __le32 *fw_data; u32 fw_size; u32 __iomem *load_addr; @@ -2511,9 +2511,9 @@ static int fore200e_load_and_start_fw(struct fore200e *fore200e) return err; } - fw_data = (__le32 *) firmware->data; + fw_data = (const __le32 *)firmware->data; fw_size = firmware->size / sizeof(u32); - fw_header = (struct fw_header *) firmware->data; + fw_header = (const struct fw_header *)firmware->data; load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset); DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n", diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 1af94e2d1a25..9b035b7d7f4f 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -550,4 +550,6 @@ config CRYPTO_DEV_ROCKCHIP This driver interfaces with the hardware crypto accelerator. Supporting cbc/ecb chainmode, and aes/des/des3_ede cipher mode. +source "drivers/crypto/chelsio/Kconfig" + endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 3c6432dd09d9..ad7250fa1348 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -31,3 +31,4 @@ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/ obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/ obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/ +obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chelsio/ diff --git a/drivers/crypto/chelsio/Kconfig b/drivers/crypto/chelsio/Kconfig new file mode 100644 index 000000000000..4ce67fb9a880 --- /dev/null +++ b/drivers/crypto/chelsio/Kconfig @@ -0,0 +1,19 @@ +config CRYPTO_DEV_CHELSIO + tristate "Chelsio Crypto Co-processor Driver" + depends on CHELSIO_T4 + select CRYPTO_SHA1 + select CRYPTO_SHA256 + select CRYPTO_SHA512 + ---help--- + The Chelsio Crypto Co-processor driver for T6 adapters. + + For general information about Chelsio and our products, visit + our website at <http://www.chelsio.com>. + + For customer support, please visit our customer support page at + <http://www.chelsio.com/support.html>. + + Please send feedback to <linux-bugs@chelsio.com>. + + To compile this driver as a module, choose M here: the module + will be called chcr. diff --git a/drivers/crypto/chelsio/Makefile b/drivers/crypto/chelsio/Makefile new file mode 100644 index 000000000000..bebdf06687ad --- /dev/null +++ b/drivers/crypto/chelsio/Makefile @@ -0,0 +1,4 @@ +ccflags-y := -Idrivers/net/ethernet/chelsio/cxgb4 + +obj-$(CONFIG_CRYPTO_DEV_CHELSIO) += chcr.o +chcr-objs := chcr_core.o chcr_algo.o diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c new file mode 100644 index 000000000000..ad8e353cf897 --- /dev/null +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -0,0 +1,1525 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written and Maintained by: + * Manoj Malviya (manojmalviya@chelsio.com) + * Atul Gupta (atul.gupta@chelsio.com) + * Jitendra Lulla (jlulla@chelsio.com) + * Yeshaswi M R Gowda (yeshaswi@chelsio.com) + * Harsh Jain (harsh@chelsio.com) + */ + +#define pr_fmt(fmt) "chcr:" fmt + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/cryptohash.h> +#include <linux/skbuff.h> +#include <linux/rtnetlink.h> +#include <linux/highmem.h> +#include <linux/scatterlist.h> + +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <crypto/internal/hash.h> + +#include "t4fw_api.h" +#include "t4_msg.h" +#include "chcr_core.h" +#include "chcr_algo.h" +#include "chcr_crypto.h" + +static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx) +{ + return ctx->crypto_ctx->ablkctx; +} + +static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx) +{ + return ctx->crypto_ctx->hmacctx; +} + +static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx) +{ + return ctx->dev->u_ctx; +} + +static inline int is_ofld_imm(const struct sk_buff *skb) +{ + return (skb->len <= CRYPTO_MAX_IMM_TX_PKT_LEN); +} + +/* + * sgl_len - calculates the size of an SGL of the given capacity + * @n: the number of SGL entries + * Calculates the number of flits needed for a scatter/gather list that + * can hold the given number of entries. + */ +static inline unsigned int sgl_len(unsigned int n) +{ + n--; + return (3 * n) / 2 + (n & 1) + 2; +} + +/* + * chcr_handle_resp - Unmap the DMA buffers associated with the request + * @req: crypto request + */ +int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, + int error_status) +{ + struct crypto_tfm *tfm = req->tfm; + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct chcr_req_ctx ctx_req; + struct cpl_fw6_pld *fw6_pld; + unsigned int digestsize, updated_digestsize; + + switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_BLKCIPHER: + ctx_req.req.ablk_req = (struct ablkcipher_request *)req; + ctx_req.ctx.ablk_ctx = + ablkcipher_request_ctx(ctx_req.req.ablk_req); + if (!error_status) { + fw6_pld = (struct cpl_fw6_pld *)input; + memcpy(ctx_req.req.ablk_req->info, &fw6_pld->data[2], + AES_BLOCK_SIZE); + } + dma_unmap_sg(&u_ctx->lldi.pdev->dev, ctx_req.req.ablk_req->dst, + ABLK_CTX(ctx)->dst_nents, DMA_FROM_DEVICE); + if (ctx_req.ctx.ablk_ctx->skb) { + kfree_skb(ctx_req.ctx.ablk_ctx->skb); + ctx_req.ctx.ablk_ctx->skb = NULL; + } + break; + + case CRYPTO_ALG_TYPE_AHASH: + ctx_req.req.ahash_req = (struct ahash_request *)req; + ctx_req.ctx.ahash_ctx = + ahash_request_ctx(ctx_req.req.ahash_req); + digestsize = + crypto_ahash_digestsize(crypto_ahash_reqtfm( + ctx_req.req.ahash_req)); + updated_digestsize = digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + updated_digestsize = SHA256_DIGEST_SIZE; + else if (digestsize == SHA384_DIGEST_SIZE) + updated_digestsize = SHA512_DIGEST_SIZE; + if (ctx_req.ctx.ahash_ctx->skb) + ctx_req.ctx.ahash_ctx->skb = NULL; + if (ctx_req.ctx.ahash_ctx->result == 1) { + ctx_req.ctx.ahash_ctx->result = 0; + memcpy(ctx_req.req.ahash_req->result, input + + sizeof(struct cpl_fw6_pld), + digestsize); + } else { + memcpy(ctx_req.ctx.ahash_ctx->partial_hash, input + + sizeof(struct cpl_fw6_pld), + updated_digestsize); + } + kfree(ctx_req.ctx.ahash_ctx->dummy_payload_ptr); + ctx_req.ctx.ahash_ctx->dummy_payload_ptr = NULL; + break; + } + return 0; +} + +/* + * calc_tx_flits_ofld - calculate # of flits for an offload packet + * @skb: the packet + * Returns the number of flits needed for the given offload packet. + * These packets are already fully constructed and no additional headers + * will be added. + */ +static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb) +{ + unsigned int flits, cnt; + + if (is_ofld_imm(skb)) + return DIV_ROUND_UP(skb->len, 8); + + flits = skb_transport_offset(skb) / 8; /* headers */ + cnt = skb_shinfo(skb)->nr_frags; + if (skb_tail_pointer(skb) != skb_transport_header(skb)) + cnt++; + return flits + sgl_len(cnt); +} + +static struct shash_desc *chcr_alloc_shash(unsigned int ds) +{ + struct crypto_shash *base_hash = NULL; + struct shash_desc *desc; + + switch (ds) { + case SHA1_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha1-generic", 0, 0); + break; + case SHA224_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha224-generic", 0, 0); + break; + case SHA256_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha256-generic", 0, 0); + break; + case SHA384_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha384-generic", 0, 0); + break; + case SHA512_DIGEST_SIZE: + base_hash = crypto_alloc_shash("sha512-generic", 0, 0); + break; + } + if (IS_ERR(base_hash)) { + pr_err("Can not allocate sha-generic algo.\n"); + return (void *)base_hash; + } + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(base_hash), + GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + desc->tfm = base_hash; + desc->flags = crypto_shash_get_flags(base_hash); + return desc; +} + +static int chcr_compute_partial_hash(struct shash_desc *desc, + char *iopad, char *result_hash, + int digest_size) +{ + struct sha1_state sha1_st; + struct sha256_state sha256_st; + struct sha512_state sha512_st; + int error; + + if (digest_size == SHA1_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha1_st); + memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE); + } else if (digest_size == SHA224_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha256_st); + memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); + + } else if (digest_size == SHA256_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha256_st); + memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE); + + } else if (digest_size == SHA384_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha512_st); + memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); + + } else if (digest_size == SHA512_DIGEST_SIZE) { + error = crypto_shash_init(desc) ?: + crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?: + crypto_shash_export(desc, (void *)&sha512_st); + memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE); + } else { + error = -EINVAL; + pr_err("Unknown digest size %d\n", digest_size); + } + return error; +} + +static void chcr_change_order(char *buf, int ds) +{ + int i; + + if (ds == SHA512_DIGEST_SIZE) { + for (i = 0; i < (ds / sizeof(u64)); i++) + *((__be64 *)buf + i) = + cpu_to_be64(*((u64 *)buf + i)); + } else { + for (i = 0; i < (ds / sizeof(u32)); i++) + *((__be32 *)buf + i) = + cpu_to_be32(*((u32 *)buf + i)); + } +} + +static inline int is_hmac(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct chcr_alg_template *chcr_crypto_alg = + container_of(__crypto_ahash_alg(alg), struct chcr_alg_template, + alg.hash); + if ((chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK) == + CRYPTO_ALG_SUB_TYPE_HASH_HMAC) + return 1; + return 0; +} + +static inline unsigned int ch_nents(struct scatterlist *sg, + unsigned int *total_size) +{ + unsigned int nents; + + for (nents = 0, *total_size = 0; sg; sg = sg_next(sg)) { + nents++; + *total_size += sg->length; + } + return nents; +} + +static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl, + struct scatterlist *sg, + struct phys_sge_parm *sg_param) +{ + struct phys_sge_pairs *to; + unsigned int out_buf_size = sg_param->obsize; + unsigned int nents = sg_param->nents, i, j, tot_len = 0; + + phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL) + | CPL_RX_PHYS_DSGL_ISRDMA_V(0)); + phys_cpl->pcirlxorder_to_noofsgentr = + htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) | + CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) | + CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) | + CPL_RX_PHYS_DSGL_PCITPHNT_V(0) | + CPL_RX_PHYS_DSGL_DCAID_V(0) | + CPL_RX_PHYS_DSGL_NOOFSGENTR_V(nents)); + phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; + phys_cpl->rss_hdr_int.qid = htons(sg_param->qid); + phys_cpl->rss_hdr_int.hash_val = 0; + to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl + + sizeof(struct cpl_rx_phys_dsgl)); + + for (i = 0; nents; to++) { + for (j = i; (nents && (j < (8 + i))); j++, nents--) { + to->len[j] = htons(sg->length); + to->addr[j] = cpu_to_be64(sg_dma_address(sg)); + if (out_buf_size) { + if (tot_len + sg_dma_len(sg) >= out_buf_size) { + to->len[j] = htons(out_buf_size - + tot_len); + return; + } + tot_len += sg_dma_len(sg); + } + sg = sg_next(sg); + } + } +} + +static inline unsigned +int map_writesg_phys_cpl(struct device *dev, struct cpl_rx_phys_dsgl *phys_cpl, + struct scatterlist *sg, struct phys_sge_parm *sg_param) +{ + if (!sg || !sg_param->nents) + return 0; + + sg_param->nents = dma_map_sg(dev, sg, sg_param->nents, DMA_FROM_DEVICE); + if (sg_param->nents == 0) { + pr_err("CHCR : DMA mapping failed\n"); + return -EINVAL; + } + write_phys_cpl(phys_cpl, sg, sg_param); + return 0; +} + +static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct chcr_alg_template *chcr_crypto_alg = + container_of(alg, struct chcr_alg_template, alg.crypto); + + return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK; +} + +static inline void +write_sg_data_page_desc(struct sk_buff *skb, unsigned int *frags, + struct scatterlist *sg, unsigned int count) +{ + struct page *spage; + unsigned int page_len; + + skb->len += count; + skb->data_len += count; + skb->truesize += count; + while (count > 0) { + if (sg && (!(sg->length))) + break; + spage = sg_page(sg); + get_page(spage); + page_len = min(sg->length, count); + skb_fill_page_desc(skb, *frags, spage, sg->offset, page_len); + (*frags)++; + count -= page_len; + sg = sg_next(sg); + } +} + +static int generate_copy_rrkey(struct ablk_ctx *ablkctx, + struct _key_ctx *key_ctx) +{ + if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { + get_aes_decrypt_key(key_ctx->key, ablkctx->key, + ablkctx->enckey_len << 3); + memset(key_ctx->key + ablkctx->enckey_len, 0, + CHCR_AES_MAX_KEY_LEN - ablkctx->enckey_len); + } else { + memcpy(key_ctx->key, + ablkctx->key + (ablkctx->enckey_len >> 1), + ablkctx->enckey_len >> 1); + get_aes_decrypt_key(key_ctx->key + (ablkctx->enckey_len >> 1), + ablkctx->key, ablkctx->enckey_len << 2); + } + return 0; +} + +static inline void create_wreq(struct chcr_context *ctx, + struct fw_crypto_lookaside_wr *wreq, + void *req, struct sk_buff *skb, + int kctx_len, int hash_sz, + unsigned int phys_dsgl) +{ + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct ulp_txpkt *ulptx = (struct ulp_txpkt *)(wreq + 1); + struct ulptx_idata *sc_imm = (struct ulptx_idata *)(ulptx + 1); + int iv_loc = IV_DSGL; + int qid = u_ctx->lldi.rxq_ids[ctx->tx_channel_id]; + unsigned int immdatalen = 0, nr_frags = 0; + + if (is_ofld_imm(skb)) { + immdatalen = skb->data_len; + iv_loc = IV_IMMEDIATE; + } else { + nr_frags = skb_shinfo(skb)->nr_frags; + } + + wreq->op_to_cctx_size = FILL_WR_OP_CCTX_SIZE(immdatalen, + (kctx_len >> 4)); + wreq->pld_size_hash_size = + htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(sgl_lengths[nr_frags]) | + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz)); + wreq->len16_pkd = htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP( + (calc_tx_flits_ofld(skb) * 8), 16))); + wreq->cookie = cpu_to_be64((uintptr_t)req); + wreq->rx_chid_to_rx_q_id = + FILL_WR_RX_Q_ID(ctx->dev->tx_channel_id, qid, + (hash_sz) ? IV_NOP : iv_loc); + + ulptx->cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id); + ulptx->len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8), + 16) - ((sizeof(*wreq)) >> 4))); + + sc_imm->cmd_more = FILL_CMD_MORE(immdatalen); + sc_imm->len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) + kctx_len + + ((hash_sz) ? DUMMY_BYTES : + (sizeof(struct cpl_rx_phys_dsgl) + + phys_dsgl)) + immdatalen); +} + +/** + * create_cipher_wr - form the WR for cipher operations + * @req: cipher req. + * @ctx: crypto driver context of the request. + * @qid: ingress qid where response of this WR should be received. + * @op_type: encryption or decryption + */ +static struct sk_buff +*create_cipher_wr(struct crypto_async_request *req_base, + struct chcr_context *ctx, unsigned short qid, + unsigned short op_type) +{ + struct ablkcipher_request *req = (struct ablkcipher_request *)req_base; + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); + struct sk_buff *skb = NULL; + struct _key_ctx *key_ctx; + struct fw_crypto_lookaside_wr *wreq; + struct cpl_tx_sec_pdu *sec_cpl; + struct cpl_rx_phys_dsgl *phys_cpl; + struct chcr_blkcipher_req_ctx *req_ctx = ablkcipher_request_ctx(req); + struct phys_sge_parm sg_param; + unsigned int frags = 0, transhdr_len, phys_dsgl, dst_bufsize = 0; + unsigned int ivsize = crypto_ablkcipher_ivsize(tfm), kctx_len; + + if (!req->info) + return ERR_PTR(-EINVAL); + ablkctx->dst_nents = ch_nents(req->dst, &dst_bufsize); + ablkctx->enc = op_type; + + if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) || + (req->nbytes <= 0) || (req->nbytes % AES_BLOCK_SIZE)) + return ERR_PTR(-EINVAL); + + phys_dsgl = get_space_for_phys_dsgl(ablkctx->dst_nents); + + kctx_len = sizeof(*key_ctx) + + (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16); + transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl); + skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), + GFP_ATOMIC); + if (!skb) + return ERR_PTR(-ENOMEM); + skb_reserve(skb, sizeof(struct sge_opaque_hdr)); + wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len); + + sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET); + sec_cpl->op_ivinsrtofst = + FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 1, 1); + + sec_cpl->pldlen = htonl(ivsize + req->nbytes); + sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, + ivsize + 1, 0); + + sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0, 0, + 0, 0); + sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type, 0, + ablkctx->ciph_mode, + 0, 0, ivsize >> 1, 1); + sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0, + 0, 1, phys_dsgl); + + key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl)); + key_ctx->ctx_hdr = ablkctx->key_ctx_hdr; + if (op_type == CHCR_DECRYPT_OP) { + if (generate_copy_rrkey(ablkctx, key_ctx)) + goto map_fail1; + } else { + if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) { + memcpy(key_ctx->key, ablkctx->key, ablkctx->enckey_len); + } else { + memcpy(key_ctx->key, ablkctx->key + + (ablkctx->enckey_len >> 1), + ablkctx->enckey_len >> 1); + memcpy(key_ctx->key + + (ablkctx->enckey_len >> 1), + ablkctx->key, + ablkctx->enckey_len >> 1); + } + } + phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)key_ctx + kctx_len); + + memcpy(ablkctx->iv, req->info, ivsize); + sg_init_table(&ablkctx->iv_sg, 1); + sg_set_buf(&ablkctx->iv_sg, ablkctx->iv, ivsize); + sg_param.nents = ablkctx->dst_nents; + sg_param.obsize = dst_bufsize; + sg_param.qid = qid; + sg_param.align = 1; + if (map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl, req->dst, + &sg_param)) + goto map_fail1; + + skb_set_transport_header(skb, transhdr_len); + write_sg_data_page_desc(skb, &frags, &ablkctx->iv_sg, ivsize); + write_sg_data_page_desc(skb, &frags, req->src, req->nbytes); + create_wreq(ctx, wreq, req, skb, kctx_len, 0, phys_dsgl); + req_ctx->skb = skb; + skb_get(skb); + return skb; +map_fail1: + kfree_skb(skb); + return ERR_PTR(-ENOMEM); +} + +static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int keylen) +{ + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); + struct ablkcipher_alg *alg = crypto_ablkcipher_alg(tfm); + unsigned int ck_size, context_size; + u16 alignment = 0; + + if ((keylen < alg->min_keysize) || (keylen > alg->max_keysize)) + goto badkey_err; + + memcpy(ablkctx->key, key, keylen); + ablkctx->enckey_len = keylen; + if (keylen == AES_KEYSIZE_128) { + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; + } else if (keylen == AES_KEYSIZE_192) { + alignment = 8; + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; + } else if (keylen == AES_KEYSIZE_256) { + ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; + } else { + goto badkey_err; + } + + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + + keylen + alignment) >> 4; + + ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, + 0, 0, context_size); + ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; + return 0; +badkey_err: + crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + ablkctx->enckey_len = 0; + return -EINVAL; +} + +int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx) +{ + int ret = 0; + struct sge_ofld_txq *q; + struct adapter *adap = netdev2adap(dev); + + local_bh_disable(); + q = &adap->sge.ofldtxq[idx]; + spin_lock(&q->sendq.lock); + if (q->full) + ret = -1; + spin_unlock(&q->sendq.lock); + local_bh_enable(); + return ret; +} + +static int chcr_aes_encrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_async_request *req_base = &req->base; + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct sk_buff *skb; + + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + skb = create_cipher_wr(req_base, ctx, + u_ctx->lldi.rxq_ids[ctx->tx_channel_id], + CHCR_ENCRYPT_OP); + if (IS_ERR(skb)) { + pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + return PTR_ERR(skb); + } + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_aes_decrypt(struct ablkcipher_request *req) +{ + struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct crypto_async_request *req_base = &req->base; + struct uld_ctx *u_ctx = ULD_CTX(ctx); + struct sk_buff *skb; + + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + skb = create_cipher_wr(req_base, ctx, u_ctx->lldi.rxq_ids[0], + CHCR_DECRYPT_OP); + if (IS_ERR(skb)) { + pr_err("chcr : %s : Failed to form WR. No memory\n", __func__); + return PTR_ERR(skb); + } + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_device_init(struct chcr_context *ctx) +{ + struct uld_ctx *u_ctx; + unsigned int id; + int err = 0, rxq_perchan, rxq_idx; + + id = smp_processor_id(); + if (!ctx->dev) { + err = assign_chcr_device(&ctx->dev); + if (err) { + pr_err("chcr device assignment fails\n"); + goto out; + } + u_ctx = ULD_CTX(ctx); + rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan; + ctx->dev->tx_channel_id = 0; + rxq_idx = ctx->dev->tx_channel_id * rxq_perchan; + rxq_idx += id % rxq_perchan; + spin_lock(&ctx->dev->lock_chcr_dev); + ctx->tx_channel_id = rxq_idx; + spin_unlock(&ctx->dev->lock_chcr_dev); + } +out: + return err; +} + +static int chcr_cra_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); + return chcr_device_init(crypto_tfm_ctx(tfm)); +} + +static int get_alg_config(struct algo_param *params, + unsigned int auth_size) +{ + switch (auth_size) { + case SHA1_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; + params->result_size = SHA1_DIGEST_SIZE; + break; + case SHA224_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224; + params->result_size = SHA256_DIGEST_SIZE; + break; + case SHA256_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; + params->result_size = SHA256_DIGEST_SIZE; + break; + case SHA384_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; + params->result_size = SHA512_DIGEST_SIZE; + break; + case SHA512_DIGEST_SIZE: + params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; + params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; + params->result_size = SHA512_DIGEST_SIZE; + break; + default: + pr_err("chcr : ERROR, unsupported digest size\n"); + return -EINVAL; + } + return 0; +} + +static inline int +write_buffer_data_page_desc(struct chcr_ahash_req_ctx *req_ctx, + struct sk_buff *skb, unsigned int *frags, char *bfr, + u8 bfr_len) +{ + void *page_ptr = NULL; + + skb->len += bfr_len; + skb->data_len += bfr_len; + skb->truesize += bfr_len; + page_ptr = kmalloc(CHCR_HASH_MAX_BLOCK_SIZE_128, GFP_ATOMIC | GFP_DMA); + if (!page_ptr) + return -ENOMEM; + get_page(virt_to_page(page_ptr)); + req_ctx->dummy_payload_ptr = page_ptr; + memcpy(page_ptr, bfr, bfr_len); + skb_fill_page_desc(skb, *frags, virt_to_page(page_ptr), + offset_in_page(page_ptr), bfr_len); + (*frags)++; + return 0; +} + +/** + * create_final_hash_wr - Create hash work request + * @req - Cipher req base + */ +static struct sk_buff *create_final_hash_wr(struct ahash_request *req, + struct hash_wr_param *param) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + struct sk_buff *skb = NULL; + struct _key_ctx *key_ctx; + struct fw_crypto_lookaside_wr *wreq; + struct cpl_tx_sec_pdu *sec_cpl; + unsigned int frags = 0, transhdr_len, iopad_alignment = 0; + unsigned int digestsize = crypto_ahash_digestsize(tfm); + unsigned int kctx_len = sizeof(*key_ctx); + u8 hash_size_in_response = 0; + + iopad_alignment = KEYCTX_ALIGN_PAD(digestsize); + kctx_len += param->alg_prm.result_size + iopad_alignment; + if (param->opad_needed) + kctx_len += param->alg_prm.result_size + iopad_alignment; + + if (req_ctx->result) + hash_size_in_response = digestsize; + else + hash_size_in_response = param->alg_prm.result_size; + transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); + skb = alloc_skb((transhdr_len + sizeof(struct sge_opaque_hdr)), + GFP_ATOMIC); + if (!skb) + return skb; + + skb_reserve(skb, sizeof(struct sge_opaque_hdr)); + wreq = (struct fw_crypto_lookaside_wr *)__skb_put(skb, transhdr_len); + memset(wreq, 0, transhdr_len); + + sec_cpl = (struct cpl_tx_sec_pdu *)((u8 *)wreq + SEC_CPL_OFFSET); + sec_cpl->op_ivinsrtofst = + FILL_SEC_CPL_OP_IVINSR(ctx->dev->tx_channel_id, 2, 0, 0); + sec_cpl->pldlen = htonl(param->bfr_len + param->sg_len); + + sec_cpl->aadstart_cipherstop_hi = + FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0); + sec_cpl->cipherstop_lo_authinsert = + FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0); + sec_cpl->seqno_numivs = + FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode, + param->opad_needed, 0, 0); + + sec_cpl->ivgen_hdrlen = + FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0); + + key_ctx = (struct _key_ctx *)((u8 *)sec_cpl + sizeof(*sec_cpl)); + memcpy(key_ctx->key, req_ctx->partial_hash, param->alg_prm.result_size); + + if (param->opad_needed) + memcpy(key_ctx->key + ((param->alg_prm.result_size <= 32) ? 32 : + CHCR_HASH_MAX_DIGEST_SIZE), + hmacctx->opad, param->alg_prm.result_size); + + key_ctx->ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY, + param->alg_prm.mk_size, 0, + param->opad_needed, + (kctx_len >> 4)); + sec_cpl->scmd1 = cpu_to_be64((u64)param->scmd1); + + skb_set_transport_header(skb, transhdr_len); + if (param->bfr_len != 0) + write_buffer_data_page_desc(req_ctx, skb, &frags, req_ctx->bfr, + param->bfr_len); + if (param->sg_len != 0) + write_sg_data_page_desc(skb, &frags, req->src, param->sg_len); + + create_wreq(ctx, wreq, req, skb, kctx_len, hash_size_in_response, + 0); + req_ctx->skb = skb; + skb_get(skb); + return skb; +} + +static int chcr_ahash_update(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + u8 remainder = 0, bs; + unsigned int nbytes = req->nbytes; + struct hash_wr_param params; + + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + u_ctx = ULD_CTX(ctx); + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + if (nbytes + req_ctx->bfr_len >= bs) { + remainder = (nbytes + req_ctx->bfr_len) % bs; + nbytes = nbytes + req_ctx->bfr_len - remainder; + } else { + sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->bfr + + req_ctx->bfr_len, nbytes, 0); + req_ctx->bfr_len += nbytes; + return 0; + } + + params.opad_needed = 0; + params.more = 1; + params.last = 0; + params.sg_len = nbytes - req_ctx->bfr_len; + params.bfr_len = req_ctx->bfr_len; + params.scmd1 = 0; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->result = 0; + req_ctx->data_len += params.sg_len + params.bfr_len; + skb = create_final_hash_wr(req, ¶ms); + if (!skb) + return -ENOMEM; + + req_ctx->bfr_len = remainder; + if (remainder) + sg_pcopy_to_buffer(req->src, sg_nents(req->src), + req_ctx->bfr, remainder, req->nbytes - + remainder); + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + + return -EINPROGRESS; +} + +static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1) +{ + memset(bfr_ptr, 0, bs); + *bfr_ptr = 0x80; + if (bs == 64) + *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3); + else + *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3); +} + +static int chcr_ahash_final(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct hash_wr_param params; + struct sk_buff *skb; + struct uld_ctx *u_ctx = NULL; + u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + u_ctx = ULD_CTX(ctx); + if (is_hmac(crypto_ahash_tfm(rtfm))) + params.opad_needed = 1; + else + params.opad_needed = 0; + params.sg_len = 0; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->result = 1; + params.bfr_len = req_ctx->bfr_len; + req_ctx->data_len += params.bfr_len + params.sg_len; + if (req_ctx->bfr && (req_ctx->bfr_len == 0)) { + create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len); + params.last = 0; + params.more = 1; + params.scmd1 = 0; + params.bfr_len = bs; + + } else { + params.scmd1 = req_ctx->data_len; + params.last = 1; + params.more = 0; + } + skb = create_final_hash_wr(req, ¶ms); + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_ahash_finup(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + struct hash_wr_param params; + u8 bs; + + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + u_ctx = ULD_CTX(ctx); + + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + if (is_hmac(crypto_ahash_tfm(rtfm))) + params.opad_needed = 1; + else + params.opad_needed = 0; + + params.sg_len = req->nbytes; + params.bfr_len = req_ctx->bfr_len; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->data_len += params.bfr_len + params.sg_len; + req_ctx->result = 1; + if (req_ctx->bfr && (req_ctx->bfr_len + req->nbytes) == 0) { + create_last_hash_block(req_ctx->bfr, bs, req_ctx->data_len); + params.last = 0; + params.more = 1; + params.scmd1 = 0; + params.bfr_len = bs; + } else { + params.scmd1 = req_ctx->data_len; + params.last = 1; + params.more = 0; + } + + skb = create_final_hash_wr(req, ¶ms); + if (!skb) + return -ENOMEM; + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + + return -EINPROGRESS; +} + +static int chcr_ahash_digest(struct ahash_request *req) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct uld_ctx *u_ctx = NULL; + struct sk_buff *skb; + struct hash_wr_param params; + u8 bs; + + rtfm->init(req); + bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + u_ctx = ULD_CTX(ctx); + if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0], + ctx->tx_channel_id))) { + if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) + return -EBUSY; + } + + if (is_hmac(crypto_ahash_tfm(rtfm))) + params.opad_needed = 1; + else + params.opad_needed = 0; + + params.last = 0; + params.more = 0; + params.sg_len = req->nbytes; + params.bfr_len = 0; + params.scmd1 = 0; + get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm)); + req_ctx->result = 1; + req_ctx->data_len += params.bfr_len + params.sg_len; + + if (req_ctx->bfr && req->nbytes == 0) { + create_last_hash_block(req_ctx->bfr, bs, 0); + params.more = 1; + params.bfr_len = bs; + } + + skb = create_final_hash_wr(req, ¶ms); + if (!skb) + return -ENOMEM; + + skb->dev = u_ctx->lldi.ports[0]; + set_wr_txq(skb, CPL_PRIORITY_DATA, ctx->tx_channel_id); + chcr_send_wr(skb); + return -EINPROGRESS; +} + +static int chcr_ahash_export(struct ahash_request *areq, void *out) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct chcr_ahash_req_ctx *state = out; + + state->bfr_len = req_ctx->bfr_len; + state->data_len = req_ctx->data_len; + memcpy(state->bfr, req_ctx->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128); + memcpy(state->partial_hash, req_ctx->partial_hash, + CHCR_HASH_MAX_DIGEST_SIZE); + return 0; +} + +static int chcr_ahash_import(struct ahash_request *areq, const void *in) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in; + + req_ctx->bfr_len = state->bfr_len; + req_ctx->data_len = state->data_len; + req_ctx->dummy_payload_ptr = NULL; + memcpy(req_ctx->bfr, state->bfr, CHCR_HASH_MAX_BLOCK_SIZE_128); + memcpy(req_ctx->partial_hash, state->partial_hash, + CHCR_HASH_MAX_DIGEST_SIZE); + return 0; +} + +static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int keylen) +{ + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + unsigned int digestsize = crypto_ahash_digestsize(tfm); + unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm)); + unsigned int i, err = 0, updated_digestsize; + + /* + * use the key to calculate the ipad and opad. ipad will sent with the + * first request's data. opad will be sent with the final hash result + * ipad in hmacctx->ipad and opad in hmacctx->opad location + */ + if (!hmacctx->desc) + return -EINVAL; + if (keylen > bs) { + err = crypto_shash_digest(hmacctx->desc, key, keylen, + hmacctx->ipad); + if (err) + goto out; + keylen = digestsize; + } else { + memcpy(hmacctx->ipad, key, keylen); + } + memset(hmacctx->ipad + keylen, 0, bs - keylen); + memcpy(hmacctx->opad, hmacctx->ipad, bs); + + for (i = 0; i < bs / sizeof(int); i++) { + *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA; + *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA; + } + + updated_digestsize = digestsize; + if (digestsize == SHA224_DIGEST_SIZE) + updated_digestsize = SHA256_DIGEST_SIZE; + else if (digestsize == SHA384_DIGEST_SIZE) + updated_digestsize = SHA512_DIGEST_SIZE; + err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->ipad, + hmacctx->ipad, digestsize); + if (err) + goto out; + chcr_change_order(hmacctx->ipad, updated_digestsize); + + err = chcr_compute_partial_hash(hmacctx->desc, hmacctx->opad, + hmacctx->opad, digestsize); + if (err) + goto out; + chcr_change_order(hmacctx->opad, updated_digestsize); +out: + return err; +} + +static int chcr_aes_xts_setkey(struct crypto_ablkcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct chcr_context *ctx = crypto_ablkcipher_ctx(tfm); + struct ablk_ctx *ablkctx = ABLK_CTX(ctx); + int status = 0; + unsigned short context_size = 0; + + if ((key_len == (AES_KEYSIZE_128 << 1)) || + (key_len == (AES_KEYSIZE_256 << 1))) { + memcpy(ablkctx->key, key, key_len); + ablkctx->enckey_len = key_len; + context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4; + ablkctx->key_ctx_hdr = + FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ? + CHCR_KEYCTX_CIPHER_KEY_SIZE_128 : + CHCR_KEYCTX_CIPHER_KEY_SIZE_256, + CHCR_KEYCTX_NO_KEY, 1, + 0, context_size); + ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; + } else { + crypto_tfm_set_flags((struct crypto_tfm *)tfm, + CRYPTO_TFM_RES_BAD_KEY_LEN); + ablkctx->enckey_len = 0; + status = -EINVAL; + } + return status; +} + +static int chcr_sha_init(struct ahash_request *areq) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq); + int digestsize = crypto_ahash_digestsize(tfm); + + req_ctx->data_len = 0; + req_ctx->dummy_payload_ptr = NULL; + req_ctx->bfr_len = 0; + req_ctx->skb = NULL; + req_ctx->result = 0; + copy_hash_init_values(req_ctx->partial_hash, digestsize); + return 0; +} + +static int chcr_sha_cra_init(struct crypto_tfm *tfm) +{ + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct chcr_ahash_req_ctx)); + return chcr_device_init(crypto_tfm_ctx(tfm)); +} + +static int chcr_hmac_init(struct ahash_request *areq) +{ + struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq); + struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq); + struct chcr_context *ctx = crypto_tfm_ctx(crypto_ahash_tfm(rtfm)); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + unsigned int digestsize = crypto_ahash_digestsize(rtfm); + unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm)); + + chcr_sha_init(areq); + req_ctx->data_len = bs; + if (is_hmac(crypto_ahash_tfm(rtfm))) { + if (digestsize == SHA224_DIGEST_SIZE) + memcpy(req_ctx->partial_hash, hmacctx->ipad, + SHA256_DIGEST_SIZE); + else if (digestsize == SHA384_DIGEST_SIZE) + memcpy(req_ctx->partial_hash, hmacctx->ipad, + SHA512_DIGEST_SIZE); + else + memcpy(req_ctx->partial_hash, hmacctx->ipad, + digestsize); + } + return 0; +} + +static int chcr_hmac_cra_init(struct crypto_tfm *tfm) +{ + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + unsigned int digestsize = + crypto_ahash_digestsize(__crypto_ahash_cast(tfm)); + + crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), + sizeof(struct chcr_ahash_req_ctx)); + hmacctx->desc = chcr_alloc_shash(digestsize); + if (IS_ERR(hmacctx->desc)) + return PTR_ERR(hmacctx->desc); + return chcr_device_init(crypto_tfm_ctx(tfm)); +} + +static void chcr_free_shash(struct shash_desc *desc) +{ + crypto_free_shash(desc->tfm); + kfree(desc); +} + +static void chcr_hmac_cra_exit(struct crypto_tfm *tfm) +{ + struct chcr_context *ctx = crypto_tfm_ctx(tfm); + struct hmac_ctx *hmacctx = HMAC_CTX(ctx); + + if (hmacctx->desc) { + chcr_free_shash(hmacctx->desc); + hmacctx->desc = NULL; + } +} + +static struct chcr_alg_template driver_algs[] = { + /* AES-CBC */ + { + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .is_registered = 0, + .alg.crypto = { + .cra_name = "cbc(aes)", + .cra_driver_name = "cbc(aes-chcr)", + .cra_priority = CHCR_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct chcr_context) + + sizeof(struct ablk_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = chcr_cra_init, + .cra_exit = NULL, + .cra_u.ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_cbc_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_ABLKCIPHER, + .is_registered = 0, + .alg.crypto = { + .cra_name = "xts(aes)", + .cra_driver_name = "xts(aes-chcr)", + .cra_priority = CHCR_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | + CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct chcr_context) + + sizeof(struct ablk_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = chcr_cra_init, + .cra_exit = NULL, + .cra_u = { + .ablkcipher = { + .min_keysize = 2 * AES_MIN_KEY_SIZE, + .max_keysize = 2 * AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = chcr_aes_xts_setkey, + .encrypt = chcr_aes_encrypt, + .decrypt = chcr_aes_decrypt, + } + } + } + }, + /* SHA */ + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-chcr", + .cra_blocksize = SHA1_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-chcr", + .cra_blocksize = SHA256_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha224", + .cra_driver_name = "sha224-chcr", + .cra_blocksize = SHA224_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha384", + .cra_driver_name = "sha384-chcr", + .cra_blocksize = SHA384_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_AHASH, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "sha512", + .cra_driver_name = "sha512-chcr", + .cra_blocksize = SHA512_BLOCK_SIZE, + } + } + }, + /* HMAC */ + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA1_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "hmac(sha1-chcr)", + .cra_blocksize = SHA1_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA224_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha224)", + .cra_driver_name = "hmac(sha224-chcr)", + .cra_blocksize = SHA224_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA256_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "hmac(sha256-chcr)", + .cra_blocksize = SHA256_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA384_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha384)", + .cra_driver_name = "hmac(sha384-chcr)", + .cra_blocksize = SHA384_BLOCK_SIZE, + } + } + }, + { + .type = CRYPTO_ALG_TYPE_HMAC, + .is_registered = 0, + .alg.hash = { + .halg.digestsize = SHA512_DIGEST_SIZE, + .halg.base = { + .cra_name = "hmac(sha512)", + .cra_driver_name = "hmac(sha512-chcr)", + .cra_blocksize = SHA512_BLOCK_SIZE, + } + } + }, +}; + +/* + * chcr_unregister_alg - Deregister crypto algorithms with + * kernel framework. + */ +static int chcr_unregister_alg(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + if (driver_algs[i].is_registered) + crypto_unregister_alg( + &driver_algs[i].alg.crypto); + break; + case CRYPTO_ALG_TYPE_AHASH: + if (driver_algs[i].is_registered) + crypto_unregister_ahash( + &driver_algs[i].alg.hash); + break; + } + driver_algs[i].is_registered = 0; + } + return 0; +} + +#define SZ_AHASH_CTX sizeof(struct chcr_context) +#define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx)) +#define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx) +#define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC) + +/* + * chcr_register_alg - Register crypto algorithms with kernel framework. + */ +static int chcr_register_alg(void) +{ + struct crypto_alg ai; + struct ahash_alg *a_hash; + int err = 0, i; + char *name = NULL; + + for (i = 0; i < ARRAY_SIZE(driver_algs); i++) { + if (driver_algs[i].is_registered) + continue; + switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) { + case CRYPTO_ALG_TYPE_ABLKCIPHER: + err = crypto_register_alg(&driver_algs[i].alg.crypto); + name = driver_algs[i].alg.crypto.cra_driver_name; + break; + case CRYPTO_ALG_TYPE_AHASH: + a_hash = &driver_algs[i].alg.hash; + a_hash->update = chcr_ahash_update; + a_hash->final = chcr_ahash_final; + a_hash->finup = chcr_ahash_finup; + a_hash->digest = chcr_ahash_digest; + a_hash->export = chcr_ahash_export; + a_hash->import = chcr_ahash_import; + a_hash->halg.statesize = SZ_AHASH_REQ_CTX; + a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY; + a_hash->halg.base.cra_module = THIS_MODULE; + a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS; + a_hash->halg.base.cra_alignmask = 0; + a_hash->halg.base.cra_exit = NULL; + a_hash->halg.base.cra_type = &crypto_ahash_type; + + if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) { + a_hash->halg.base.cra_init = chcr_hmac_cra_init; + a_hash->halg.base.cra_exit = chcr_hmac_cra_exit; + a_hash->init = chcr_hmac_init; + a_hash->setkey = chcr_ahash_setkey; + a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX; + } else { + a_hash->init = chcr_sha_init; + a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX; + a_hash->halg.base.cra_init = chcr_sha_cra_init; + } + err = crypto_register_ahash(&driver_algs[i].alg.hash); + ai = driver_algs[i].alg.hash.halg.base; + name = ai.cra_driver_name; + break; + } + if (err) { + pr_err("chcr : %s : Algorithm registration failed\n", + name); + goto register_err; + } else { + driver_algs[i].is_registered = 1; + } + } + return 0; + +register_err: + chcr_unregister_alg(); + return err; +} + +/* + * start_crypto - Register the crypto algorithms. + * This should called once when the first device comesup. After this + * kernel will start calling driver APIs for crypto operations. + */ +int start_crypto(void) +{ + return chcr_register_alg(); +} + +/* + * stop_crypto - Deregister all the crypto algorithms with kernel. + * This should be called once when the last device goes down. After this + * kernel will not call the driver API for crypto operations. + */ +int stop_crypto(void) +{ + chcr_unregister_alg(); + return 0; +} diff --git a/drivers/crypto/chelsio/chcr_algo.h b/drivers/crypto/chelsio/chcr_algo.h new file mode 100644 index 000000000000..ec64fbcdeb49 --- /dev/null +++ b/drivers/crypto/chelsio/chcr_algo.h @@ -0,0 +1,471 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __CHCR_ALGO_H__ +#define __CHCR_ALGO_H__ + +/* Crypto key context */ +#define KEY_CONTEXT_CTX_LEN_S 24 +#define KEY_CONTEXT_CTX_LEN_M 0xff +#define KEY_CONTEXT_CTX_LEN_V(x) ((x) << KEY_CONTEXT_CTX_LEN_S) +#define KEY_CONTEXT_CTX_LEN_G(x) \ + (((x) >> KEY_CONTEXT_CTX_LEN_S) & KEY_CONTEXT_CTX_LEN_M) + +#define KEY_CONTEXT_DUAL_CK_S 12 +#define KEY_CONTEXT_DUAL_CK_M 0x1 +#define KEY_CONTEXT_DUAL_CK_V(x) ((x) << KEY_CONTEXT_DUAL_CK_S) +#define KEY_CONTEXT_DUAL_CK_G(x) \ +(((x) >> KEY_CONTEXT_DUAL_CK_S) & KEY_CONTEXT_DUAL_CK_M) +#define KEY_CONTEXT_DUAL_CK_F KEY_CONTEXT_DUAL_CK_V(1U) + +#define KEY_CONTEXT_SALT_PRESENT_S 10 +#define KEY_CONTEXT_SALT_PRESENT_M 0x1 +#define KEY_CONTEXT_SALT_PRESENT_V(x) ((x) << KEY_CONTEXT_SALT_PRESENT_S) +#define KEY_CONTEXT_SALT_PRESENT_G(x) \ + (((x) >> KEY_CONTEXT_SALT_PRESENT_S) & \ + KEY_CONTEXT_SALT_PRESENT_M) +#define KEY_CONTEXT_SALT_PRESENT_F KEY_CONTEXT_SALT_PRESENT_V(1U) + +#define KEY_CONTEXT_VALID_S 0 +#define KEY_CONTEXT_VALID_M 0x1 +#define KEY_CONTEXT_VALID_V(x) ((x) << KEY_CONTEXT_VALID_S) +#define KEY_CONTEXT_VALID_G(x) \ + (((x) >> KEY_CONTEXT_VALID_S) & \ + KEY_CONTEXT_VALID_M) +#define KEY_CONTEXT_VALID_F KEY_CONTEXT_VALID_V(1U) + +#define KEY_CONTEXT_CK_SIZE_S 6 +#define KEY_CONTEXT_CK_SIZE_M 0xf +#define KEY_CONTEXT_CK_SIZE_V(x) ((x) << KEY_CONTEXT_CK_SIZE_S) +#define KEY_CONTEXT_CK_SIZE_G(x) \ + (((x) >> KEY_CONTEXT_CK_SIZE_S) & KEY_CONTEXT_CK_SIZE_M) + +#define KEY_CONTEXT_MK_SIZE_S 2 +#define KEY_CONTEXT_MK_SIZE_M 0xf +#define KEY_CONTEXT_MK_SIZE_V(x) ((x) << KEY_CONTEXT_MK_SIZE_S) +#define KEY_CONTEXT_MK_SIZE_G(x) \ + (((x) >> KEY_CONTEXT_MK_SIZE_S) & KEY_CONTEXT_MK_SIZE_M) + +#define KEY_CONTEXT_OPAD_PRESENT_S 11 +#define KEY_CONTEXT_OPAD_PRESENT_M 0x1 +#define KEY_CONTEXT_OPAD_PRESENT_V(x) ((x) << KEY_CONTEXT_OPAD_PRESENT_S) +#define KEY_CONTEXT_OPAD_PRESENT_G(x) \ + (((x) >> KEY_CONTEXT_OPAD_PRESENT_S) & \ + KEY_CONTEXT_OPAD_PRESENT_M) +#define KEY_CONTEXT_OPAD_PRESENT_F KEY_CONTEXT_OPAD_PRESENT_V(1U) + +#define CHCR_HASH_MAX_DIGEST_SIZE 64 +#define CHCR_MAX_SHA_DIGEST_SIZE 64 + +#define IPSEC_TRUNCATED_ICV_SIZE 12 +#define TLS_TRUNCATED_HMAC_SIZE 10 +#define CBCMAC_DIGEST_SIZE 16 +#define MAX_HASH_NAME 20 + +#define SHA1_INIT_STATE_5X4B 5 +#define SHA256_INIT_STATE_8X4B 8 +#define SHA512_INIT_STATE_8X8B 8 +#define SHA1_INIT_STATE SHA1_INIT_STATE_5X4B +#define SHA224_INIT_STATE SHA256_INIT_STATE_8X4B +#define SHA256_INIT_STATE SHA256_INIT_STATE_8X4B +#define SHA384_INIT_STATE SHA512_INIT_STATE_8X8B +#define SHA512_INIT_STATE SHA512_INIT_STATE_8X8B + +#define DUMMY_BYTES 16 + +#define IPAD_DATA 0x36363636 +#define OPAD_DATA 0x5c5c5c5c + +#define TRANSHDR_SIZE(alignedkctx_len)\ + (sizeof(struct ulptx_idata) +\ + sizeof(struct ulp_txpkt) +\ + sizeof(struct fw_crypto_lookaside_wr) +\ + sizeof(struct cpl_tx_sec_pdu) +\ + (alignedkctx_len)) +#define CIPHER_TRANSHDR_SIZE(alignedkctx_len, sge_pairs) \ + (TRANSHDR_SIZE(alignedkctx_len) + sge_pairs +\ + sizeof(struct cpl_rx_phys_dsgl)) +#define HASH_TRANSHDR_SIZE(alignedkctx_len)\ + (TRANSHDR_SIZE(alignedkctx_len) + DUMMY_BYTES) + +#define SEC_CPL_OFFSET (sizeof(struct fw_crypto_lookaside_wr) + \ + sizeof(struct ulp_txpkt) + \ + sizeof(struct ulptx_idata)) + +#define FILL_SEC_CPL_OP_IVINSR(id, len, hldr, ofst) \ + htonl( \ + CPL_TX_SEC_PDU_OPCODE_V(CPL_TX_SEC_PDU) | \ + CPL_TX_SEC_PDU_RXCHID_V((id)) | \ + CPL_TX_SEC_PDU_ACKFOLLOWS_V(0) | \ + CPL_TX_SEC_PDU_ULPTXLPBK_V(1) | \ + CPL_TX_SEC_PDU_CPLLEN_V((len)) | \ + CPL_TX_SEC_PDU_PLACEHOLDER_V((hldr)) | \ + CPL_TX_SEC_PDU_IVINSRTOFST_V((ofst))) + +#define FILL_SEC_CPL_CIPHERSTOP_HI(a_start, a_stop, c_start, c_stop_hi) \ + htonl( \ + CPL_TX_SEC_PDU_AADSTART_V((a_start)) | \ + CPL_TX_SEC_PDU_AADSTOP_V((a_stop)) | \ + CPL_TX_SEC_PDU_CIPHERSTART_V((c_start)) | \ + CPL_TX_SEC_PDU_CIPHERSTOP_HI_V((c_stop_hi))) + +#define FILL_SEC_CPL_AUTHINSERT(c_stop_lo, a_start, a_stop, a_inst) \ + htonl( \ + CPL_TX_SEC_PDU_CIPHERSTOP_LO_V((c_stop_lo)) | \ + CPL_TX_SEC_PDU_AUTHSTART_V((a_start)) | \ + CPL_TX_SEC_PDU_AUTHSTOP_V((a_stop)) | \ + CPL_TX_SEC_PDU_AUTHINSERT_V((a_inst))) + +#define FILL_SEC_CPL_SCMD0_SEQNO(ctrl, seq, cmode, amode, opad, size, nivs) \ + htonl( \ + SCMD_SEQ_NO_CTRL_V(0) | \ + SCMD_STATUS_PRESENT_V(0) | \ + SCMD_PROTO_VERSION_V(CHCR_SCMD_PROTO_VERSION_GENERIC) | \ + SCMD_ENC_DEC_CTRL_V((ctrl)) | \ + SCMD_CIPH_AUTH_SEQ_CTRL_V((seq)) | \ + SCMD_CIPH_MODE_V((cmode)) | \ + SCMD_AUTH_MODE_V((amode)) | \ + SCMD_HMAC_CTRL_V((opad)) | \ + SCMD_IV_SIZE_V((size)) | \ + SCMD_NUM_IVS_V((nivs))) + +#define FILL_SEC_CPL_IVGEN_HDRLEN(last, more, ctx_in, mac, ivdrop, len) htonl( \ + SCMD_ENB_DBGID_V(0) | \ + SCMD_IV_GEN_CTRL_V(0) | \ + SCMD_LAST_FRAG_V((last)) | \ + SCMD_MORE_FRAGS_V((more)) | \ + SCMD_TLS_COMPPDU_V(0) | \ + SCMD_KEY_CTX_INLINE_V((ctx_in)) | \ + SCMD_TLS_FRAG_ENABLE_V(0) | \ + SCMD_MAC_ONLY_V((mac)) | \ + SCMD_AADIVDROP_V((ivdrop)) | \ + SCMD_HDR_LEN_V((len))) + +#define FILL_KEY_CTX_HDR(ck_size, mk_size, d_ck, opad, ctx_len) \ + htonl(KEY_CONTEXT_VALID_V(1) | \ + KEY_CONTEXT_CK_SIZE_V((ck_size)) | \ + KEY_CONTEXT_MK_SIZE_V(mk_size) | \ + KEY_CONTEXT_DUAL_CK_V((d_ck)) | \ + KEY_CONTEXT_OPAD_PRESENT_V((opad)) | \ + KEY_CONTEXT_SALT_PRESENT_V(1) | \ + KEY_CONTEXT_CTX_LEN_V((ctx_len))) + +#define FILL_WR_OP_CCTX_SIZE(len, ctx_len) \ + htonl( \ + FW_CRYPTO_LOOKASIDE_WR_OPCODE_V( \ + FW_CRYPTO_LOOKASIDE_WR) | \ + FW_CRYPTO_LOOKASIDE_WR_COMPL_V(0) | \ + FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V((len)) | \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(1) | \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V((ctx_len))) + +#define FILL_WR_RX_Q_ID(cid, qid, wr_iv) \ + htonl( \ + FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V((cid)) | \ + FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V((qid)) | \ + FW_CRYPTO_LOOKASIDE_WR_LCB_V(0) | \ + FW_CRYPTO_LOOKASIDE_WR_IV_V((wr_iv))) + +#define FILL_ULPTX_CMD_DEST(cid) \ + htonl(ULPTX_CMD_V(ULP_TX_PKT) | \ + ULP_TXPKT_DEST_V(0) | \ + ULP_TXPKT_DATAMODIFY_V(0) | \ + ULP_TXPKT_CHANNELID_V((cid)) | \ + ULP_TXPKT_RO_V(1) | \ + ULP_TXPKT_FID_V(0)) + +#define KEYCTX_ALIGN_PAD(bs) ({unsigned int _bs = (bs);\ + _bs == SHA1_DIGEST_SIZE ? 12 : 0; }) + +#define FILL_PLD_SIZE_HASH_SIZE(payload_sgl_len, sgl_lengths, total_frags) \ + htonl(FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(payload_sgl_len ? \ + sgl_lengths[total_frags] : 0) |\ + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(0)) + +#define FILL_LEN_PKD(calc_tx_flits_ofld, skb) \ + htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP((\ + calc_tx_flits_ofld(skb) * 8), 16))) + +#define FILL_CMD_MORE(immdatalen) htonl(ULPTX_CMD_V(ULP_TX_SC_IMM) |\ + ULP_TX_SC_MORE_V((immdatalen) ? 0 : 1)) + +#define MAX_NK 8 +#define CRYPTO_MAX_IMM_TX_PKT_LEN 256 + +struct algo_param { + unsigned int auth_mode; + unsigned int mk_size; + unsigned int result_size; +}; + +struct hash_wr_param { + unsigned int opad_needed; + unsigned int more; + unsigned int last; + struct algo_param alg_prm; + unsigned int sg_len; + unsigned int bfr_len; + u64 scmd1; +}; + +enum { + AES_KEYLENGTH_128BIT = 128, + AES_KEYLENGTH_192BIT = 192, + AES_KEYLENGTH_256BIT = 256 +}; + +enum { + KEYLENGTH_3BYTES = 3, + KEYLENGTH_4BYTES = 4, + KEYLENGTH_6BYTES = 6, + KEYLENGTH_8BYTES = 8 +}; + +enum { + NUMBER_OF_ROUNDS_10 = 10, + NUMBER_OF_ROUNDS_12 = 12, + NUMBER_OF_ROUNDS_14 = 14, +}; + +/* + * CCM defines values of 4, 6, 8, 10, 12, 14, and 16 octets, + * where they indicate the size of the integrity check value (ICV) + */ +enum { + AES_CCM_ICV_4 = 4, + AES_CCM_ICV_6 = 6, + AES_CCM_ICV_8 = 8, + AES_CCM_ICV_10 = 10, + AES_CCM_ICV_12 = 12, + AES_CCM_ICV_14 = 14, + AES_CCM_ICV_16 = 16 +}; + +struct hash_op_params { + unsigned char mk_size; + unsigned char pad_align; + unsigned char auth_mode; + char hash_name[MAX_HASH_NAME]; + unsigned short block_size; + unsigned short word_size; + unsigned short ipad_size; +}; + +struct phys_sge_pairs { + __be16 len[8]; + __be64 addr[8]; +}; + +struct phys_sge_parm { + unsigned int nents; + unsigned int obsize; + unsigned short qid; + unsigned char align; +}; + +struct crypto_result { + struct completion completion; + int err; +}; + +static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = { + SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, +}; + +static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = { + SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3, + SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7, +}; + +static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7, +}; + +static const u64 sha384_init[SHA512_DIGEST_SIZE / 8] = { + SHA384_H0, SHA384_H1, SHA384_H2, SHA384_H3, + SHA384_H4, SHA384_H5, SHA384_H6, SHA384_H7, +}; + +static const u64 sha512_init[SHA512_DIGEST_SIZE / 8] = { + SHA512_H0, SHA512_H1, SHA512_H2, SHA512_H3, + SHA512_H4, SHA512_H5, SHA512_H6, SHA512_H7, +}; + +static inline void copy_hash_init_values(char *key, int digestsize) +{ + u8 i; + __be32 *dkey = (__be32 *)key; + u64 *ldkey = (u64 *)key; + __be64 *sha384 = (__be64 *)sha384_init; + __be64 *sha512 = (__be64 *)sha512_init; + + switch (digestsize) { + case SHA1_DIGEST_SIZE: + for (i = 0; i < SHA1_INIT_STATE; i++) + dkey[i] = cpu_to_be32(sha1_init[i]); + break; + case SHA224_DIGEST_SIZE: + for (i = 0; i < SHA224_INIT_STATE; i++) + dkey[i] = cpu_to_be32(sha224_init[i]); + break; + case SHA256_DIGEST_SIZE: + for (i = 0; i < SHA256_INIT_STATE; i++) + dkey[i] = cpu_to_be32(sha256_init[i]); + break; + case SHA384_DIGEST_SIZE: + for (i = 0; i < SHA384_INIT_STATE; i++) + ldkey[i] = be64_to_cpu(sha384[i]); + break; + case SHA512_DIGEST_SIZE: + for (i = 0; i < SHA512_INIT_STATE; i++) + ldkey[i] = be64_to_cpu(sha512[i]); + break; + } +} + +static const u8 sgl_lengths[20] = { + 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15 +}; + +/* Number of len fields(8) * size of one addr field */ +#define PHYSDSGL_MAX_LEN_SIZE 16 + +static inline u16 get_space_for_phys_dsgl(unsigned int sgl_entr) +{ + /* len field size + addr field size */ + return ((sgl_entr >> 3) + ((sgl_entr % 8) ? + 1 : 0)) * PHYSDSGL_MAX_LEN_SIZE + + (sgl_entr << 3) + ((sgl_entr % 2 ? 1 : 0) << 3); +} + +/* The AES s-transform matrix (s-box). */ +static const u8 aes_sbox[256] = { + 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, + 171, 118, 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, + 156, 164, 114, 192, 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, + 229, 241, 113, 216, 49, 21, 4, 199, 35, 195, 24, 150, 5, 154, 7, + 18, 128, 226, 235, 39, 178, 117, 9, 131, 44, 26, 27, 110, 90, + 160, 82, 59, 214, 179, 41, 227, 47, 132, 83, 209, 0, 237, 32, + 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207, 208, 239, 170, + 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168, 81, + 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, + 210, 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, + 93, 25, 115, 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, + 20, 222, 94, 11, 219, 224, 50, 58, 10, 73, 6, 36, 92, 194, + 211, 172, 98, 145, 149, 228, 121, 231, 200, 55, 109, 141, 213, 78, + 169, 108, 86, 244, 234, 101, 122, 174, 8, 186, 120, 37, 46, 28, 166, + 180, 198, 232, 221, 116, 31, 75, 189, 139, 138, 112, 62, 181, 102, + 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158, 225, 248, + 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223, + 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, + 187, 22 +}; + +static u32 aes_ks_subword(const u32 w) +{ + u8 bytes[4]; + + *(u32 *)(&bytes[0]) = w; + bytes[0] = aes_sbox[bytes[0]]; + bytes[1] = aes_sbox[bytes[1]]; + bytes[2] = aes_sbox[bytes[2]]; + bytes[3] = aes_sbox[bytes[3]]; + return *(u32 *)(&bytes[0]); +} + +static u32 round_constant[11] = { + 0x01000000, 0x02000000, 0x04000000, 0x08000000, + 0x10000000, 0x20000000, 0x40000000, 0x80000000, + 0x1B000000, 0x36000000, 0x6C000000 +}; + +/* dec_key - OUTPUT - Reverse round key + * key - INPUT - key + * keylength - INPUT - length of the key in number of bits + */ +static inline void get_aes_decrypt_key(unsigned char *dec_key, + const unsigned char *key, + unsigned int keylength) +{ + u32 temp; + u32 w_ring[MAX_NK]; + int i, j, k = 0; + u8 nr, nk; + + switch (keylength) { + case AES_KEYLENGTH_128BIT: + nk = KEYLENGTH_4BYTES; + nr = NUMBER_OF_ROUNDS_10; + break; + + case AES_KEYLENGTH_192BIT: + nk = KEYLENGTH_6BYTES; + nr = NUMBER_OF_ROUNDS_12; + break; + case AES_KEYLENGTH_256BIT: + nk = KEYLENGTH_8BYTES; + nr = NUMBER_OF_ROUNDS_14; + break; + default: + return; + } + for (i = 0; i < nk; i++ ) + w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]); + + i = 0; + temp = w_ring[nk - 1]; + while(i + nk < (nr + 1) * 4) { + if(!(i % nk)) { + /* RotWord(temp) */ + temp = (temp << 8) | (temp >> 24); + temp = aes_ks_subword(temp); + temp ^= round_constant[i / nk]; + } + else if (nk == 8 && (i % 4 == 0)) + temp = aes_ks_subword(temp); + w_ring[i % nk] ^= temp; + temp = w_ring[i % nk]; + i++; + } + for (k = 0, j = i % nk; k < nk; k++) { + *((u32 *)dec_key + k) = htonl(w_ring[j]); + j--; + if(j < 0) + j += nk; + } +} + +#endif /* __CHCR_ALGO_H__ */ diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c new file mode 100644 index 000000000000..2f6156b672ce --- /dev/null +++ b/drivers/crypto/chelsio/chcr_core.c @@ -0,0 +1,240 @@ +/** + * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux. + * + * Copyright (C) 2011-2016 Chelsio Communications. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + * + * Written and Maintained by: + * Manoj Malviya (manojmalviya@chelsio.com) + * Atul Gupta (atul.gupta@chelsio.com) + * Jitendra Lulla (jlulla@chelsio.com) + * Yeshaswi M R Gowda (yeshaswi@chelsio.com) + * Harsh Jain (harsh@chelsio.com) + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/skbuff.h> + +#include <crypto/aes.h> +#include <crypto/hash.h> + +#include "t4_msg.h" +#include "chcr_core.h" +#include "cxgb4_uld.h" + +static LIST_HEAD(uld_ctx_list); +static DEFINE_MUTEX(dev_mutex); +static atomic_t dev_count; + +typedef int (*chcr_handler_func)(struct chcr_dev *dev, unsigned char *input); +static int cpl_fw6_pld_handler(struct chcr_dev *dev, unsigned char *input); +static void *chcr_uld_add(const struct cxgb4_lld_info *lld); +static int chcr_uld_state_change(void *handle, enum cxgb4_state state); + +static chcr_handler_func work_handlers[NUM_CPL_CMDS] = { + [CPL_FW6_PLD] = cpl_fw6_pld_handler, +}; + +static struct cxgb4_pci_uld_info chcr_uld_info = { + .name = DRV_MODULE_NAME, + .nrxq = 4, + .rxq_size = 1024, + .nciq = 0, + .ciq_size = 0, + .add = chcr_uld_add, + .state_change = chcr_uld_state_change, + .rx_handler = chcr_uld_rx_handler, +}; + +int assign_chcr_device(struct chcr_dev **dev) +{ + struct uld_ctx *u_ctx; + + /* + * Which device to use if multiple devices are available TODO + * May be select the device based on round robin. One session + * must go to the same device to maintain the ordering. + */ + mutex_lock(&dev_mutex); /* TODO ? */ + u_ctx = list_first_entry(&uld_ctx_list, struct uld_ctx, entry); + if (!u_ctx) { + mutex_unlock(&dev_mutex); + return -ENXIO; + } + + *dev = u_ctx->dev; + mutex_unlock(&dev_mutex); + return 0; +} + +static int chcr_dev_add(struct uld_ctx *u_ctx) +{ + struct chcr_dev *dev; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENXIO; + + spin_lock_init(&dev->lock_chcr_dev); + u_ctx->dev = dev; + dev->u_ctx = u_ctx; + atomic_inc(&dev_count); + return 0; +} + +static int chcr_dev_remove(struct uld_ctx *u_ctx) +{ + kfree(u_ctx->dev); + u_ctx->dev = NULL; + atomic_dec(&dev_count); + return 0; +} + +static int cpl_fw6_pld_handler(struct chcr_dev *dev, + unsigned char *input) +{ + struct crypto_async_request *req; + struct cpl_fw6_pld *fw6_pld; + u32 ack_err_status = 0; + int error_status = 0; + + fw6_pld = (struct cpl_fw6_pld *)input; + req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu( + fw6_pld->data[1]); + + ack_err_status = + ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4)); + if (ack_err_status) { + if (CHK_MAC_ERR_BIT(ack_err_status) || + CHK_PAD_ERR_BIT(ack_err_status)) + error_status = -EINVAL; + } + /* call completion callback with failure status */ + if (req) { + if (!chcr_handle_resp(req, input, error_status)) + req->complete(req, error_status); + else + return -EINVAL; + } else { + pr_err("Incorrect request address from the firmware\n"); + return -EFAULT; + } + return 0; +} + +int chcr_send_wr(struct sk_buff *skb) +{ + return cxgb4_ofld_send(skb->dev, skb); +} + +static void *chcr_uld_add(const struct cxgb4_lld_info *lld) +{ + struct uld_ctx *u_ctx; + + /* Create the device and add it in the device list */ + u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL); + if (!u_ctx) { + u_ctx = ERR_PTR(-ENOMEM); + goto out; + } + u_ctx->lldi = *lld; + mutex_lock(&dev_mutex); + list_add_tail(&u_ctx->entry, &uld_ctx_list); + mutex_unlock(&dev_mutex); +out: + return u_ctx; +} + +int chcr_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *pgl) +{ + struct uld_ctx *u_ctx = (struct uld_ctx *)handle; + struct chcr_dev *dev = u_ctx->dev; + const struct cpl_act_establish *rpl = (struct cpl_act_establish + *)rsp; + + if (rpl->ot.opcode != CPL_FW6_PLD) { + pr_err("Unsupported opcode\n"); + return 0; + } + + if (!pgl) + work_handlers[rpl->ot.opcode](dev, (unsigned char *)&rsp[1]); + else + work_handlers[rpl->ot.opcode](dev, pgl->va); + return 0; +} + +static int chcr_uld_state_change(void *handle, enum cxgb4_state state) +{ + struct uld_ctx *u_ctx = handle; + int ret = 0; + + switch (state) { + case CXGB4_STATE_UP: + if (!u_ctx->dev) { + ret = chcr_dev_add(u_ctx); + if (ret != 0) + return ret; + } + if (atomic_read(&dev_count) == 1) + ret = start_crypto(); + break; + + case CXGB4_STATE_DETACH: + if (u_ctx->dev) { + mutex_lock(&dev_mutex); + chcr_dev_remove(u_ctx); + mutex_unlock(&dev_mutex); + } + if (!atomic_read(&dev_count)) + stop_crypto(); + break; + + case CXGB4_STATE_START_RECOVERY: + case CXGB4_STATE_DOWN: + default: + break; + } + return ret; +} + +static int __init chcr_crypto_init(void) +{ + if (cxgb4_register_pci_uld(CXGB4_PCI_ULD1, &chcr_uld_info)) { + pr_err("ULD register fail: No chcr crypto support in cxgb4"); + return -1; + } + + return 0; +} + +static void __exit chcr_crypto_exit(void) +{ + struct uld_ctx *u_ctx, *tmp; + + if (atomic_read(&dev_count)) + stop_crypto(); + + /* Remove all devices from list */ + mutex_lock(&dev_mutex); + list_for_each_entry_safe(u_ctx, tmp, &uld_ctx_list, entry) { + if (u_ctx->dev) + chcr_dev_remove(u_ctx); + kfree(u_ctx); + } + mutex_unlock(&dev_mutex); + cxgb4_unregister_pci_uld(CXGB4_PCI_ULD1); +} + +module_init(chcr_crypto_init); +module_exit(chcr_crypto_exit); + +MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Chelsio Communications"); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/crypto/chelsio/chcr_core.h b/drivers/crypto/chelsio/chcr_core.h new file mode 100644 index 000000000000..2a5c671a4232 --- /dev/null +++ b/drivers/crypto/chelsio/chcr_core.h @@ -0,0 +1,80 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __CHCR_CORE_H__ +#define __CHCR_CORE_H__ + +#include <crypto/algapi.h> +#include "t4_hw.h" +#include "cxgb4.h" +#include "cxgb4_uld.h" + +#define DRV_MODULE_NAME "chcr" +#define DRV_VERSION "1.0.0.0" + +#define MAX_PENDING_REQ_TO_HW 20 +#define CHCR_TEST_RESPONSE_TIMEOUT 1000 + +#define PAD_ERROR_BIT 1 +#define CHK_PAD_ERR_BIT(x) (((x) >> PAD_ERROR_BIT) & 1) + +#define MAC_ERROR_BIT 0 +#define CHK_MAC_ERR_BIT(x) (((x) >> MAC_ERROR_BIT) & 1) + +struct uld_ctx; + +struct chcr_dev { + /* Request submited to h/w and waiting for response. */ + spinlock_t lock_chcr_dev; + struct crypto_queue pending_queue; + struct uld_ctx *u_ctx; + unsigned char tx_channel_id; +}; + +struct uld_ctx { + struct list_head entry; + struct cxgb4_lld_info lldi; + struct chcr_dev *dev; +}; + +int assign_chcr_device(struct chcr_dev **dev); +int chcr_send_wr(struct sk_buff *skb); +int start_crypto(void); +int stop_crypto(void); +int chcr_uld_rx_handler(void *handle, const __be64 *rsp, + const struct pkt_gl *pgl); +int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input, + int err); +#endif /* __CHCR_CORE_H__ */ diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h new file mode 100644 index 000000000000..d7d75605da8b --- /dev/null +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -0,0 +1,203 @@ +/* + * This file is part of the Chelsio T6 Crypto driver for Linux. + * + * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#ifndef __CHCR_CRYPTO_H__ +#define __CHCR_CRYPTO_H__ + +/* Define following if h/w is not dropping the AAD and IV data before + * giving the processed data + */ + +#define CHCR_CRA_PRIORITY 300 + +#define CHCR_AES_MAX_KEY_LEN (2 * (AES_MAX_KEY_SIZE)) /* consider xts */ +#define CHCR_MAX_CRYPTO_IV_LEN 16 /* AES IV len */ + +#define CHCR_MAX_AUTHENC_AES_KEY_LEN 32 /* max aes key length*/ +#define CHCR_MAX_AUTHENC_SHA_KEY_LEN 128 /* max sha key length*/ + +#define CHCR_GIVENCRYPT_OP 2 +/* CPL/SCMD parameters */ + +#define CHCR_ENCRYPT_OP 0 +#define CHCR_DECRYPT_OP 1 + +#define CHCR_SCMD_SEQ_NO_CTRL_32BIT 1 +#define CHCR_SCMD_SEQ_NO_CTRL_48BIT 2 +#define CHCR_SCMD_SEQ_NO_CTRL_64BIT 3 + +#define CHCR_SCMD_PROTO_VERSION_GENERIC 4 + +#define CHCR_SCMD_AUTH_CTRL_AUTH_CIPHER 0 +#define CHCR_SCMD_AUTH_CTRL_CIPHER_AUTH 1 + +#define CHCR_SCMD_CIPHER_MODE_NOP 0 +#define CHCR_SCMD_CIPHER_MODE_AES_CBC 1 +#define CHCR_SCMD_CIPHER_MODE_GENERIC_AES 4 +#define CHCR_SCMD_CIPHER_MODE_AES_XTS 6 + +#define CHCR_SCMD_AUTH_MODE_NOP 0 +#define CHCR_SCMD_AUTH_MODE_SHA1 1 +#define CHCR_SCMD_AUTH_MODE_SHA224 2 +#define CHCR_SCMD_AUTH_MODE_SHA256 3 +#define CHCR_SCMD_AUTH_MODE_SHA512_224 5 +#define CHCR_SCMD_AUTH_MODE_SHA512_256 6 +#define CHCR_SCMD_AUTH_MODE_SHA512_384 7 +#define CHCR_SCMD_AUTH_MODE_SHA512_512 8 + +#define CHCR_SCMD_HMAC_CTRL_NOP 0 +#define CHCR_SCMD_HMAC_CTRL_NO_TRUNC 1 + +#define CHCR_SCMD_IVGEN_CTRL_HW 0 +#define CHCR_SCMD_IVGEN_CTRL_SW 1 +/* This are not really mac key size. They are intermediate values + * of sha engine and its size + */ +#define CHCR_KEYCTX_MAC_KEY_SIZE_128 0 +#define CHCR_KEYCTX_MAC_KEY_SIZE_160 1 +#define CHCR_KEYCTX_MAC_KEY_SIZE_192 2 +#define CHCR_KEYCTX_MAC_KEY_SIZE_256 3 +#define CHCR_KEYCTX_MAC_KEY_SIZE_512 4 +#define CHCR_KEYCTX_CIPHER_KEY_SIZE_128 0 +#define CHCR_KEYCTX_CIPHER_KEY_SIZE_192 1 +#define CHCR_KEYCTX_CIPHER_KEY_SIZE_256 2 +#define CHCR_KEYCTX_NO_KEY 15 + +#define CHCR_CPL_FW4_PLD_IV_OFFSET (5 * 64) /* bytes. flt #5 and #6 */ +#define CHCR_CPL_FW4_PLD_HASH_RESULT_OFFSET (7 * 64) /* bytes. flt #7 */ +#define CHCR_CPL_FW4_PLD_DATA_SIZE (4 * 64) /* bytes. flt #4 to #7 */ + +#define KEY_CONTEXT_HDR_SALT_AND_PAD 16 +#define flits_to_bytes(x) (x * 8) + +#define IV_NOP 0 +#define IV_IMMEDIATE 1 +#define IV_DSGL 2 + +#define CRYPTO_ALG_SUB_TYPE_MASK 0x0f000000 +#define CRYPTO_ALG_SUB_TYPE_HASH_HMAC 0x01000000 +#define CRYPTO_ALG_TYPE_HMAC (CRYPTO_ALG_TYPE_AHASH |\ + CRYPTO_ALG_SUB_TYPE_HASH_HMAC) + +#define MAX_SALT 4 +#define MAX_SCRATCH_PAD_SIZE 32 + +#define CHCR_HASH_MAX_BLOCK_SIZE_64 64 +#define CHCR_HASH_MAX_BLOCK_SIZE_128 128 + +/* Aligned to 128 bit boundary */ +struct _key_ctx { + __be32 ctx_hdr; + u8 salt[MAX_SALT]; + __be64 reserverd; + unsigned char key[0]; +}; + +struct ablk_ctx { + u8 enc; + unsigned int processed_len; + __be32 key_ctx_hdr; + unsigned int enckey_len; + unsigned int dst_nents; + struct scatterlist iv_sg; + u8 key[CHCR_AES_MAX_KEY_LEN]; + u8 iv[CHCR_MAX_CRYPTO_IV_LEN]; + unsigned char ciph_mode; +}; + +struct hmac_ctx { + struct shash_desc *desc; + u8 ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; + u8 opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; +}; + +struct __crypto_ctx { + struct hmac_ctx hmacctx[0]; + struct ablk_ctx ablkctx[0]; +}; + +struct chcr_context { + struct chcr_dev *dev; + unsigned char tx_channel_id; + struct __crypto_ctx crypto_ctx[0]; +}; + +struct chcr_ahash_req_ctx { + u32 result; + char bfr[CHCR_HASH_MAX_BLOCK_SIZE_128]; + u8 bfr_len; + /* DMA the partial hash in it */ + u8 partial_hash[CHCR_HASH_MAX_DIGEST_SIZE]; + u64 data_len; /* Data len till time */ + void *dummy_payload_ptr; + /* SKB which is being sent to the hardware for processing */ + struct sk_buff *skb; +}; + +struct chcr_blkcipher_req_ctx { + struct sk_buff *skb; +}; + +struct chcr_alg_template { + u32 type; + u32 is_registered; + union { + struct crypto_alg crypto; + struct ahash_alg hash; + } alg; +}; + +struct chcr_req_ctx { + union { + struct ahash_request *ahash_req; + struct ablkcipher_request *ablk_req; + } req; + union { + struct chcr_ahash_req_ctx *ahash_ctx; + struct chcr_blkcipher_req_ctx *ablk_ctx; + } ctx; +}; + +struct sge_opaque_hdr { + void *dev; + dma_addr_t addr[MAX_SKB_FRAGS + 1]; +}; + +typedef struct sk_buff *(*create_wr_t)(struct crypto_async_request *req, + struct chcr_context *ctx, + unsigned short qid, + unsigned short op_type); + +#endif /* __CHCR_CRYPTO_H__ */ diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index b2b838724a9b..8e6fe13dbec3 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -136,20 +136,6 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds) return BCM_SF2_STATS_SIZE; } -static const char *bcm_sf2_sw_drv_probe(struct device *dsa_dev, - struct device *host_dev, int sw_addr, - void **_priv) -{ - struct bcm_sf2_priv *priv; - - priv = devm_kzalloc(dsa_dev, sizeof(*priv), GFP_KERNEL); - if (!priv) - return NULL; - *_priv = priv; - - return "Broadcom Starfighter 2"; -} - static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) { struct bcm_sf2_priv *priv = ds_to_priv(ds); @@ -1571,34 +1557,95 @@ static int bcm_sf2_sw_vlan_dump(struct dsa_switch *ds, int port, static int bcm_sf2_sw_setup(struct dsa_switch *ds) { - const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; struct bcm_sf2_priv *priv = ds_to_priv(ds); - struct device_node *dn; - void __iomem **base; unsigned int port; + + /* Enable all valid ports and disable those unused */ + for (port = 0; port < priv->hw_params.num_ports; port++) { + /* IMP port receives special treatment */ + if ((1 << port) & ds->enabled_port_mask) + bcm_sf2_port_setup(ds, port, NULL); + else if (dsa_is_cpu_port(ds, port)) + bcm_sf2_imp_setup(ds, port); + else + bcm_sf2_port_disable(ds, port, NULL); + } + + bcm_sf2_sw_configure_vlan(ds); + + return 0; +} + +static struct dsa_switch_driver bcm_sf2_switch_driver = { + .tag_protocol = DSA_TAG_PROTO_BRCM, + .setup = bcm_sf2_sw_setup, + .set_addr = bcm_sf2_sw_set_addr, + .get_phy_flags = bcm_sf2_sw_get_phy_flags, + .get_strings = bcm_sf2_sw_get_strings, + .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, + .get_sset_count = bcm_sf2_sw_get_sset_count, + .adjust_link = bcm_sf2_sw_adjust_link, + .fixed_link_update = bcm_sf2_sw_fixed_link_update, + .suspend = bcm_sf2_sw_suspend, + .resume = bcm_sf2_sw_resume, + .get_wol = bcm_sf2_sw_get_wol, + .set_wol = bcm_sf2_sw_set_wol, + .port_enable = bcm_sf2_port_setup, + .port_disable = bcm_sf2_port_disable, + .get_eee = bcm_sf2_sw_get_eee, + .set_eee = bcm_sf2_sw_set_eee, + .port_bridge_join = bcm_sf2_sw_br_join, + .port_bridge_leave = bcm_sf2_sw_br_leave, + .port_stp_state_set = bcm_sf2_sw_br_set_stp_state, + .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, + .port_fdb_add = bcm_sf2_sw_fdb_add, + .port_fdb_del = bcm_sf2_sw_fdb_del, + .port_fdb_dump = bcm_sf2_sw_fdb_dump, + .port_vlan_filtering = bcm_sf2_sw_vlan_filtering, + .port_vlan_prepare = bcm_sf2_sw_vlan_prepare, + .port_vlan_add = bcm_sf2_sw_vlan_add, + .port_vlan_del = bcm_sf2_sw_vlan_del, + .port_vlan_dump = bcm_sf2_sw_vlan_dump, +}; + +static int bcm_sf2_sw_probe(struct platform_device *pdev) +{ + const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; + struct device_node *dn = pdev->dev.of_node; + struct bcm_sf2_priv *priv; + struct dsa_switch *ds; + void __iomem **base; + struct resource *r; unsigned int i; u32 reg, rev; int ret; + ds = devm_kzalloc(&pdev->dev, sizeof(*ds) + sizeof(*priv), GFP_KERNEL); + if (!ds) + return -ENOMEM; + + priv = (struct bcm_sf2_priv *)(ds + 1); + ds->priv = priv; + ds->dev = &pdev->dev; + ds->drv = &bcm_sf2_switch_driver; + + dev_set_drvdata(&pdev->dev, ds); + spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); - /* All the interesting properties are at the parent device_node - * level - */ - dn = ds->cd->of_node->parent; - bcm_sf2_identify_ports(priv, ds->cd->of_node); + bcm_sf2_identify_ports(priv, dn->child); priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); base = &priv->core; for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - *base = of_iomap(dn, i); - if (*base == NULL) { + r = platform_get_resource(pdev, IORESOURCE_MEM, i); + *base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(*base)) { pr_err("unable to find register: %s\n", reg_names[i]); - ret = -ENOMEM; - goto out_unmap; + return PTR_ERR(*base); } base++; } @@ -1606,30 +1653,30 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) ret = bcm_sf2_sw_rst(priv); if (ret) { pr_err("unable to software reset switch: %d\n", ret); - goto out_unmap; + return ret; } ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); - goto out_unmap; + return ret; } /* Disable all interrupts and request them */ bcm_sf2_intr_disable(priv); - ret = request_irq(priv->irq0, bcm_sf2_switch_0_isr, 0, - "switch_0", priv); + ret = devm_request_irq(&pdev->dev, priv->irq0, bcm_sf2_switch_0_isr, 0, + "switch_0", priv); if (ret < 0) { pr_err("failed to request switch_0 IRQ\n"); goto out_mdio; } - ret = request_irq(priv->irq1, bcm_sf2_switch_1_isr, 0, - "switch_1", priv); + ret = devm_request_irq(&pdev->dev, priv->irq1, bcm_sf2_switch_1_isr, 0, + "switch_1", priv); if (ret < 0) { pr_err("failed to request switch_1 IRQ\n"); - goto out_free_irq0; + goto out_mdio; } /* Reset the MIB counters */ @@ -1649,19 +1696,6 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) &priv->hw_params.num_gphy)) priv->hw_params.num_gphy = 1; - /* Enable all valid ports and disable those unused */ - for (port = 0; port < priv->hw_params.num_ports; port++) { - /* IMP port receives special treatment */ - if ((1 << port) & ds->enabled_port_mask) - bcm_sf2_port_setup(ds, port, NULL); - else if (dsa_is_cpu_port(ds, port)) - bcm_sf2_imp_setup(ds, port); - else - bcm_sf2_port_disable(ds, port, NULL); - } - - bcm_sf2_sw_configure_vlan(ds); - rev = reg_readl(priv, REG_SWITCH_REVISION); priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & SWITCH_TOP_REV_MASK; @@ -1670,6 +1704,10 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) rev = reg_readl(priv, REG_PHY_REVISION); priv->hw_params.gphy_rev = rev & PHY_REVISION_MASK; + ret = dsa_register_switch(ds, dn); + if (ret) + goto out_mdio; + pr_info("Starfighter 2 top: %x.%02x, core: %x.%02x base: 0x%p, IRQs: %d, %d\n", priv->hw_params.top_rev >> 8, priv->hw_params.top_rev & 0xff, priv->hw_params.core_rev >> 8, priv->hw_params.core_rev & 0xff, @@ -1677,66 +1715,61 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) return 0; -out_free_irq0: - free_irq(priv->irq0, priv); out_mdio: bcm_sf2_mdio_unregister(priv); -out_unmap: - base = &priv->core; - for (i = 0; i < BCM_SF2_REGS_NUM; i++) { - if (*base) - iounmap(*base); - base++; - } return ret; } -static struct dsa_switch_driver bcm_sf2_switch_driver = { - .tag_protocol = DSA_TAG_PROTO_BRCM, - .probe = bcm_sf2_sw_drv_probe, - .setup = bcm_sf2_sw_setup, - .set_addr = bcm_sf2_sw_set_addr, - .get_phy_flags = bcm_sf2_sw_get_phy_flags, - .get_strings = bcm_sf2_sw_get_strings, - .get_ethtool_stats = bcm_sf2_sw_get_ethtool_stats, - .get_sset_count = bcm_sf2_sw_get_sset_count, - .adjust_link = bcm_sf2_sw_adjust_link, - .fixed_link_update = bcm_sf2_sw_fixed_link_update, - .suspend = bcm_sf2_sw_suspend, - .resume = bcm_sf2_sw_resume, - .get_wol = bcm_sf2_sw_get_wol, - .set_wol = bcm_sf2_sw_set_wol, - .port_enable = bcm_sf2_port_setup, - .port_disable = bcm_sf2_port_disable, - .get_eee = bcm_sf2_sw_get_eee, - .set_eee = bcm_sf2_sw_set_eee, - .port_bridge_join = bcm_sf2_sw_br_join, - .port_bridge_leave = bcm_sf2_sw_br_leave, - .port_stp_state_set = bcm_sf2_sw_br_set_stp_state, - .port_fdb_prepare = bcm_sf2_sw_fdb_prepare, - .port_fdb_add = bcm_sf2_sw_fdb_add, - .port_fdb_del = bcm_sf2_sw_fdb_del, - .port_fdb_dump = bcm_sf2_sw_fdb_dump, - .port_vlan_filtering = bcm_sf2_sw_vlan_filtering, - .port_vlan_prepare = bcm_sf2_sw_vlan_prepare, - .port_vlan_add = bcm_sf2_sw_vlan_add, - .port_vlan_del = bcm_sf2_sw_vlan_del, - .port_vlan_dump = bcm_sf2_sw_vlan_dump, -}; - -static int __init bcm_sf2_init(void) +static int bcm_sf2_sw_remove(struct platform_device *pdev) { - register_switch_driver(&bcm_sf2_switch_driver); + struct dsa_switch *ds = platform_get_drvdata(pdev); + struct bcm_sf2_priv *priv = ds_to_priv(ds); + + /* Disable all ports and interrupts */ + priv->wol_ports_mask = 0; + bcm_sf2_sw_suspend(ds); + dsa_unregister_switch(ds); + bcm_sf2_mdio_unregister(priv); return 0; } -module_init(bcm_sf2_init); -static void __exit bcm_sf2_exit(void) +#ifdef CONFIG_PM_SLEEP +static int bcm_sf2_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dsa_switch *ds = platform_get_drvdata(pdev); + + return dsa_switch_suspend(ds); +} + +static int bcm_sf2_resume(struct device *dev) { - unregister_switch_driver(&bcm_sf2_switch_driver); + struct platform_device *pdev = to_platform_device(dev); + struct dsa_switch *ds = platform_get_drvdata(pdev); + + return dsa_switch_resume(ds); } -module_exit(bcm_sf2_exit); +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops, + bcm_sf2_suspend, bcm_sf2_resume); + +static const struct of_device_id bcm_sf2_of_match[] = { + { .compatible = "brcm,bcm7445-switch-v4.0" }, + { /* sentinel */ }, +}; + +static struct platform_driver bcm_sf2_driver = { + .probe = bcm_sf2_sw_probe, + .remove = bcm_sf2_sw_remove, + .driver = { + .name = "brcm-sf2", + .of_match_table = bcm_sf2_of_match, + .pm = &bcm_sf2_pm_ops, + }, +}; +module_platform_driver(bcm_sf2_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Driver for Broadcom Starfighter 2 ethernet switch chip"); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index a230fcba5b64..014b52bd72f1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -309,9 +309,9 @@ static int mv88e6xxx_serdes_write(struct mv88e6xxx_chip *chip, int reg, u16 val) static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) { - unsigned long timeout = jiffies + HZ / 10; + int i; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { u16 val; int err; @@ -325,6 +325,7 @@ static int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, usleep_range(1000, 2000); } + dev_err(chip->dev, "Timeout while waiting for switch\n"); return -ETIMEDOUT; } @@ -333,20 +334,12 @@ static int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg, u16 update) { u16 val; - int i, err; + int err; /* Wait until the previous operation is completed */ - for (i = 0; i < 16; ++i) { - err = mv88e6xxx_read(chip, addr, reg, &val); - if (err) - return err; - - if (!(val & BIT(15))) - break; - } - - if (i == 16) - return -ETIMEDOUT; + err = mv88e6xxx_wait(chip, addr, reg, BIT(15)); + if (err) + return err; /* Set the Update bit to trigger a write operation */ val = BIT(15) | update; @@ -375,7 +368,7 @@ static int _mv88e6xxx_reg_write(struct mv88e6xxx_chip *chip, int addr, static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) { int ret; - unsigned long timeout; + int i; ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) @@ -386,8 +379,7 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) if (ret) return ret; - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; @@ -403,8 +395,7 @@ static int mv88e6xxx_ppu_disable(struct mv88e6xxx_chip *chip) static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) { - int ret, err; - unsigned long timeout; + int ret, err, i; ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_CONTROL); if (ret < 0) @@ -415,8 +406,7 @@ static int mv88e6xxx_ppu_enable(struct mv88e6xxx_chip *chip) if (err) return err; - timeout = jiffies + 1 * HZ; - while (time_before(jiffies, timeout)) { + for (i = 0; i < 16; i++) { ret = _mv88e6xxx_reg_read(chip, REG_GLOBAL, GLOBAL_STATUS); if (ret < 0) return ret; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index 9a9745c4047c..3bc0a04df107 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -92,6 +92,7 @@ MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { + struct bcma_chipinfo *ci = &core->bus->chipinfo; struct ssb_sprom *sprom = &core->bus->sprom; struct mii_bus *mii_bus; struct bgmac *bgmac; @@ -157,7 +158,8 @@ static int bgmac_probe(struct bcma_device *core) dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr, bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : ""); - if (!bgmac_is_bcm4707_family(core)) { + if (!bgmac_is_bcm4707_family(core) && + !(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) { mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr); if (!IS_ERR(mii_bus)) { err = PTR_ERR(mii_bus); @@ -230,6 +232,21 @@ static int bgmac_probe(struct bcma_device *core) bgmac->feature_flags |= BGMAC_FEAT_NO_RESET; bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500; break; + case BCMA_CHIP_ID_BCM53573: + bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; + bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED; + if (core->core_unit == 0) { + bgmac->feature_flags |= BGMAC_FEAT_CC4_IF_SW_TYPE; + if (ci->pkg == BCMA_PKG_ID_BCM47189) + bgmac->feature_flags |= + BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII; + } else if (core->core_unit == 1) { + bgmac->feature_flags |= BGMAC_FEAT_IRQ_ID_OOB_6; + bgmac->feature_flags |= BGMAC_FEAT_CC7_IF_TYPE_RGMII; + } + break; default: bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST; bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index c4751ece76f6..6ea0e5ff1e44 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -932,7 +932,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac) et_swtype <<= 4; sw_type = et_swtype; } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) { - sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; + sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII | + BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; @@ -940,6 +941,27 @@ static void bgmac_chip_reset(struct bgmac *bgmac) bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | BGMAC_CHIPCTL_1_SW_TYPE_MASK), sw_type); + } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) { + u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII | + BGMAC_CHIPCTL_4_SW_TYPE_EPHY; + u8 et_swtype = 0; + char buf[4]; + + if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { + if (kstrtou8(buf, 0, &et_swtype)) + dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n", + buf); + sw_type = (et_swtype & 0x0f) << 12; + } else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) { + sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII | + BGMAC_CHIPCTL_4_SW_TYPE_RGMII; + } + bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK | + BGMAC_CHIPCTL_4_SW_TYPE_MASK), + sw_type); + } else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) { + bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK, + BGMAC_CHIPCTL_7_IF_TYPE_RGMII); } if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) @@ -1467,6 +1489,10 @@ int bgmac_enet_probe(struct bgmac *info) */ bgmac_clk_enable(bgmac, 0); + /* This seems to be fixing IRQ by assigning OOB #6 to the core */ + if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) + bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); + bgmac_chip_reset(bgmac); err = bgmac_dma_alloc(bgmac); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index 24a250267b88..80836b4c9f38 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -369,6 +369,21 @@ #define BGMAC_CHIPCTL_1_SW_TYPE_RGMII 0x000000C0 #define BGMAC_CHIPCTL_1_RXC_DLL_BYPASS 0x00010000 +#define BGMAC_CHIPCTL_4_IF_TYPE_MASK 0x00003000 +#define BGMAC_CHIPCTL_4_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_4_IF_TYPE_MII 0x00001000 +#define BGMAC_CHIPCTL_4_IF_TYPE_RGMII 0x00002000 +#define BGMAC_CHIPCTL_4_SW_TYPE_MASK 0x0000C000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHY 0x00000000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYMII 0x00004000 +#define BGMAC_CHIPCTL_4_SW_TYPE_EPHYRMII 0x00008000 +#define BGMAC_CHIPCTL_4_SW_TYPE_RGMII 0x0000C000 + +#define BGMAC_CHIPCTL_7_IF_TYPE_MASK 0x000000C0 +#define BGMAC_CHIPCTL_7_IF_TYPE_RMII 0x00000000 +#define BGMAC_CHIPCTL_7_IF_TYPE_MII 0x00000040 +#define BGMAC_CHIPCTL_7_IF_TYPE_RGMII 0x00000080 + #define BGMAC_WEIGHT 64 #define ETHER_MAX_LEN 1518 @@ -390,6 +405,10 @@ #define BGMAC_FEAT_NO_CLR_MIB BIT(13) #define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14) #define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15) +#define BGMAC_FEAT_IRQ_ID_OOB_6 BIT(16) +#define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17) +#define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18) +#define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19) struct bgmac_slot_info { union { diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile index ace0ab98d0f1..fac2157faf69 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/Makefile +++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o -cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o +cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index bcfa51226b46..6b0528913687 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -53,6 +53,8 @@ #include "cxgb4_uld.h" #define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__) +extern struct list_head adapter_list; +extern struct mutex uld_mutex; enum { MAX_NPORTS = 4, /* max # of ports */ @@ -338,6 +340,7 @@ struct adapter_params { enum chip_type chip; /* chip code */ struct arch_specific_params arch; /* chip specific params */ unsigned char offload; + unsigned char crypto; /* HW capability for crypto */ unsigned char bypass; @@ -403,7 +406,6 @@ struct fw_info { struct fw_hdr fw_hdr; }; - struct trace_params { u32 data[TRACE_LEN / 4]; u32 mask[TRACE_LEN / 4]; @@ -510,6 +512,10 @@ enum { /* adapter flags */ FW_OFLD_CONN = (1 << 9), }; +enum { + ULP_CRYPTO_LOOKASIDE = 1 << 0, +}; + struct rx_sw_desc; struct sge_fl { /* SGE free-buffer queue state */ @@ -680,6 +686,16 @@ struct sge_ctrl_txq { /* state for an SGE control Tx queue */ u8 full; /* the Tx ring is full */ } ____cacheline_aligned_in_smp; +struct sge_uld_rxq_info { + char name[IFNAMSIZ]; /* name of ULD driver */ + struct sge_ofld_rxq *uldrxq; /* Rxq's for ULD */ + u16 *msix_tbl; /* msix_tbl for uld */ + u16 *rspq_id; /* response queue id's of rxq */ + u16 nrxq; /* # of ingress uld queues */ + u16 nciq; /* # of completion queues */ + u8 uld; /* uld type */ +}; + struct sge { struct sge_eth_txq ethtxq[MAX_ETH_QSETS]; struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS]; @@ -691,6 +707,7 @@ struct sge { struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES]; struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS]; struct sge_rspq fw_evtq ____cacheline_aligned_in_smp; + struct sge_uld_rxq_info **uld_rxq_info; struct sge_rspq intrq ____cacheline_aligned_in_smp; spinlock_t intrq_lock; @@ -702,6 +719,7 @@ struct sge { u16 niscsitq; /* # of available iSCST Rx queues */ u16 rdmaqs; /* # of available RDMA Rx queues */ u16 rdmaciqs; /* # of available RDMA concentrator IQs */ + u16 nqs_per_uld; /* # of Rx queues per ULD */ u16 iscsi_rxq[MAX_OFLD_QSETS]; u16 iscsit_rxq[MAX_ISCSIT_QUEUES]; u16 rdma_rxq[MAX_RDMA_QUEUES]; @@ -757,6 +775,17 @@ struct hash_mac_addr { u8 addr[ETH_ALEN]; }; +struct uld_msix_bmap { + unsigned long *msix_bmap; + unsigned int mapsize; + spinlock_t lock; /* lock for acquiring bitmap */ +}; + +struct uld_msix_info { + unsigned short vec; + char desc[IFNAMSIZ + 10]; +}; + struct adapter { void __iomem *regs; void __iomem *bar2; @@ -779,6 +808,9 @@ struct adapter { unsigned short vec; char desc[IFNAMSIZ + 10]; } msix_info[MAX_INGQ + 1]; + struct uld_msix_info *msix_info_ulds; /* msix info for uld's */ + struct uld_msix_bmap msix_bmap_ulds; /* msix bitmap for all uld */ + unsigned int msi_idx; struct doorbell_stats db_stats; struct sge sge; @@ -793,7 +825,9 @@ struct adapter { unsigned int clipt_start; unsigned int clipt_end; struct clip_tbl *clipt; + struct cxgb4_pci_uld_info *uld; void *uld_handle[CXGB4_ULD_MAX]; + unsigned int num_uld; struct list_head list_node; struct list_head rcu_node; struct list_head mac_hlist; /* list of MAC addresses in MPS Hash */ @@ -952,6 +986,11 @@ static inline int is_offload(const struct adapter *adap) return adap->params.offload; } +static inline int is_pci_uld(const struct adapter *adap) +{ + return adap->params.crypto; +} + static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr) { return readl(adap->regs + reg_addr); @@ -1185,8 +1224,6 @@ int t4_sge_init(struct adapter *adap); void t4_sge_start(struct adapter *adap); void t4_sge_stop(struct adapter *adap); int cxgb_busy_poll(struct napi_struct *napi); -int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, - unsigned int cnt); void cxgb4_set_ethtool_ops(struct net_device *netdev); int cxgb4_write_rss(const struct port_info *pi, const u16 *queues); extern int dbfifo_int_thresh; @@ -1289,6 +1326,18 @@ static inline int hash_mac_addr(const u8 *addr) return a & 0x3f; } +int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, + unsigned int cnt); +static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, + unsigned int us, unsigned int cnt, + unsigned int size, unsigned int iqe_size) +{ + q->adap = adap; + cxgb4_set_rspq_intr_params(q, us, cnt); + q->iqe_len = iqe_size; + q->size = size; +} + void t4_write_indirect(struct adapter *adap, unsigned int addr_reg, unsigned int data_reg, const u32 *vals, unsigned int nregs, unsigned int start_idx); @@ -1523,5 +1572,7 @@ void t4_idma_monitor(struct adapter *adapter, int hz, int ticks); int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf, unsigned int naddr, u8 *addr); - +void uld_mem_free(struct adapter *adap); +int uld_mem_alloc(struct adapter *adap); +void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl); #endif /* __CXGB4_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 2bb804c93688..85e30f19e97a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -223,8 +223,8 @@ MODULE_PARM_DESC(select_queue, static struct dentry *cxgb4_debugfs_root; -static LIST_HEAD(adapter_list); -static DEFINE_MUTEX(uld_mutex); +LIST_HEAD(adapter_list); +DEFINE_MUTEX(uld_mutex); /* Adapter list to be accessed from atomic context */ static LIST_HEAD(adap_rcu_list); static DEFINE_SPINLOCK(adap_rcu_lock); @@ -1066,20 +1066,20 @@ static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, */ static int setup_sge_queues(struct adapter *adap) { - int err, msi_idx, i, j; + int err, i, j; struct sge *s = &adap->sge; bitmap_zero(s->starving_fl, s->egr_sz); bitmap_zero(s->txq_maperr, s->egr_sz); if (adap->flags & USING_MSIX) - msi_idx = 1; /* vector 0 is for non-queue interrupts */ + adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */ else { err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, NULL, NULL, NULL, -1); if (err) return err; - msi_idx = -((int)s->intrq.abs_id + 1); + adap->msi_idx = -((int)s->intrq.abs_id + 1); } /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here, @@ -1096,7 +1096,7 @@ static int setup_sge_queues(struct adapter *adap) * new/deleted queues. */ err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], - msi_idx, NULL, fwevtq_handler, NULL, -1); + adap->msi_idx, NULL, fwevtq_handler, NULL, -1); if (err) { freeout: t4_free_sge_resources(adap); return err; @@ -1109,10 +1109,10 @@ freeout: t4_free_sge_resources(adap); struct sge_eth_txq *t = &s->ethtxq[pi->first_qset]; for (j = 0; j < pi->nqsets; j++, q++) { - if (msi_idx > 0) - msi_idx++; + if (adap->msi_idx > 0) + adap->msi_idx++; err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, - msi_idx, &q->fl, + adap->msi_idx, &q->fl, t4_ethrx_handler, NULL, t4_get_mps_bg_map(adap, @@ -1141,11 +1141,11 @@ freeout: t4_free_sge_resources(adap); } #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids, lro) do { \ - err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids, lro); \ + err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, adap->msi_idx, ids, lro); \ if (err) \ goto freeout; \ - if (msi_idx > 0) \ - msi_idx += nq; \ + if (adap->msi_idx > 0) \ + adap->msi_idx += nq; \ } while (0) ALLOC_OFLD_RXQS(s->iscsirxq, s->iscsiqsets, j, s->iscsi_rxq, false); @@ -2565,6 +2565,12 @@ static void detach_ulds(struct adapter *adap) CXGB4_STATE_DETACH); adap->uld_handle[i] = NULL; } + for (i = 0; i < CXGB4_PCI_ULD_MAX; i++) + if (adap->uld && adap->uld[i].handle) { + adap->uld[i].state_change(adap->uld[i].handle, + CXGB4_STATE_DETACH); + adap->uld[i].handle = NULL; + } if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; @@ -2584,6 +2590,10 @@ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) for (i = 0; i < CXGB4_ULD_MAX; i++) if (adap->uld_handle[i]) ulds[i].state_change(adap->uld_handle[i], new_state); + for (i = 0; i < CXGB4_PCI_ULD_MAX; i++) + if (adap->uld && adap->uld[i].handle) + adap->uld[i].state_change(adap->uld[i].handle, + new_state); mutex_unlock(&uld_mutex); } @@ -4170,6 +4180,11 @@ static int adap_init0(struct adapter *adap) adap->vres.iscsi.start = val[0]; adap->vres.iscsi.size = val[1] - val[0] + 1; } + if (caps_cmd.cryptocaps) { + /* Should query params here...TODO */ + adap->params.crypto |= ULP_CRYPTO_LOOKASIDE; + adap->num_uld += 1; + } #undef FW_PARAM_PFVF #undef FW_PARAM_DEV @@ -4351,16 +4366,6 @@ static inline bool is_x_10g_port(const struct link_config *lc) (lc->supported & FW_PORT_CAP_SPEED_40G) != 0; } -static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, - unsigned int us, unsigned int cnt, - unsigned int size, unsigned int iqe_size) -{ - q->adap = adap; - cxgb4_set_rspq_intr_params(q, us, cnt); - q->iqe_len = iqe_size; - q->size = size; -} - /* * Perform default configuration of DMA queues depending on the number and type * of ports we found and the number of available CPUs. Most settings can be @@ -4375,6 +4380,15 @@ static void cfg_queues(struct adapter *adap) #endif int ciq_size; + /* Reduce memory usage in kdump environment, disable all offload. + */ + if (is_kdump_kernel()) { + adap->params.offload = 0; + adap->params.crypto = 0; + } else if (adap->num_uld && uld_mem_alloc(adap)) { + adap->params.crypto = 0; + } + for_each_port(adap, i) n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); #ifdef CONFIG_CHELSIO_T4_DCB @@ -4405,11 +4419,6 @@ static void cfg_queues(struct adapter *adap) if (q10g > netif_get_num_default_rss_queues()) q10g = netif_get_num_default_rss_queues(); - /* Reduce memory usage in kdump environment, disable all offload. - */ - if (is_kdump_kernel()) - adap->params.offload = 0; - for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); @@ -4538,23 +4547,58 @@ static void reduce_ethqs(struct adapter *adap, int n) } } +static int get_msix_info(struct adapter *adap) +{ + struct uld_msix_info *msix_info; + int max_ingq = (MAX_OFLD_QSETS * adap->num_uld); + + msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL); + if (!msix_info) + return -ENOMEM; + + adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq), + sizeof(long), GFP_KERNEL); + if (!adap->msix_bmap_ulds.msix_bmap) { + kfree(msix_info); + return -ENOMEM; + } + spin_lock_init(&adap->msix_bmap_ulds.lock); + adap->msix_info_ulds = msix_info; + return 0; +} + +static void free_msix_info(struct adapter *adap) +{ + if (!adap->num_uld) + return; + + kfree(adap->msix_info_ulds); + kfree(adap->msix_bmap_ulds.msix_bmap); +} + /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */ #define EXTRA_VECS 2 static int enable_msix(struct adapter *adap) { - int ofld_need = 0; - int i, want, need, allocated; + int ofld_need = 0, uld_need = 0; + int i, j, want, need, allocated; struct sge *s = &adap->sge; unsigned int nchan = adap->params.nports; struct msix_entry *entries; + int max_ingq = MAX_INGQ; - entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1), + max_ingq += (MAX_OFLD_QSETS * adap->num_uld); + entries = kmalloc(sizeof(*entries) * (max_ingq + 1), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < MAX_INGQ + 1; ++i) + /* map for msix */ + if (is_pci_uld(adap) && get_msix_info(adap)) + adap->params.crypto = 0; + + for (i = 0; i < max_ingq + 1; ++i) entries[i].entry = i; want = s->max_ethqsets + EXTRA_VECS; @@ -4567,13 +4611,17 @@ static int enable_msix(struct adapter *adap) else ofld_need = 4 * nchan; } + if (is_pci_uld(adap)) { + want += netif_get_num_default_rss_queues() * nchan; + uld_need = nchan; + } #ifdef CONFIG_CHELSIO_T4_DCB /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for * each port. */ - need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; + need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need; #else - need = adap->params.nports + EXTRA_VECS + ofld_need; + need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need; #endif allocated = pci_enable_msix_range(adap->pdev, entries, need, want); if (allocated < 0) { @@ -4587,12 +4635,20 @@ static int enable_msix(struct adapter *adap) * Every group gets its minimum requirement and NIC gets top * priority for leftovers. */ - i = allocated - EXTRA_VECS - ofld_need; + i = allocated - EXTRA_VECS - ofld_need - uld_need; if (i < s->max_ethqsets) { s->max_ethqsets = i; if (i < s->ethqsets) reduce_ethqs(adap, i); } + if (is_pci_uld(adap)) { + if (allocated < want) + s->nqs_per_uld = nchan; + else + s->nqs_per_uld = netif_get_num_default_rss_queues() * + nchan; + } + if (is_offload(adap)) { if (allocated < want) { s->rdmaqs = nchan; @@ -4604,16 +4660,24 @@ static int enable_msix(struct adapter *adap) /* leftovers go to OFLD */ i = allocated - EXTRA_VECS - s->max_ethqsets - - s->rdmaqs - s->rdmaciqs - s->niscsitq; + s->rdmaqs - s->rdmaciqs - s->niscsitq; + if (is_pci_uld(adap)) + i -= s->nqs_per_uld * adap->num_uld; s->iscsiqsets = (i / nchan) * nchan; /* round down */ } - for (i = 0; i < allocated; ++i) + + for (i = 0; i < (allocated - (s->nqs_per_uld * adap->num_uld)); ++i) adap->msix_info[i].vec = entries[i].vector; + if (is_pci_uld(adap)) { + for (j = 0 ; i < allocated; ++i, j++) + adap->msix_info_ulds[j].vec = entries[i].vector; + adap->msix_bmap_ulds.mapsize = j; + } dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " - "nic %d iscsi %d rdma cpl %d rdma ciq %d\n", + "nic %d iscsi %d rdma cpl %d rdma ciq %d uld %d\n", allocated, s->max_ethqsets, s->iscsiqsets, s->rdmaqs, - s->rdmaciqs); + s->rdmaciqs, s->nqs_per_uld); kfree(entries); return 0; @@ -5215,8 +5279,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* See what interrupts we'll be using */ if (msi > 1 && enable_msix(adapter) == 0) adapter->flags |= USING_MSIX; - else if (msi > 0 && pci_enable_msi(pdev) == 0) + else if (msi > 0 && pci_enable_msi(pdev) == 0) { adapter->flags |= USING_MSI; + if (msi > 1) + free_msix_info(adapter); + } /* check for PCI Express bandwidth capabiltites */ cxgb4_check_pcie_caps(adapter); @@ -5332,6 +5399,10 @@ sriov: out_free_dev: free_some_resources(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); + if (adapter->num_uld) + uld_mem_free(adapter); out_unmap_bar: if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); @@ -5393,6 +5464,10 @@ static void remove_one(struct pci_dev *pdev) if (adapter->flags & FULL_INIT_DONE) cxgb_down(adapter); + if (adapter->flags & USING_MSIX) + free_msix_info(adapter); + if (adapter->num_uld) + uld_mem_free(adapter); free_some_resources(adapter); #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c new file mode 100644 index 000000000000..aac6e444abf2 --- /dev/null +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -0,0 +1,555 @@ +/* + * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management + * + * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Written by: Atul Gupta (atul.gupta@chelsio.com) + * Written by: Hariprasad Shenai (hariprasad@chelsio.com) + */ + +#include <linux/kernel.h> +#include <linux/version.h> +#include <linux/module.h> +#include <linux/errno.h> +#include <linux/types.h> +#include <linux/debugfs.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/skbuff.h> +#include <linux/pci.h> + +#include "cxgb4.h" +#include "cxgb4_uld.h" +#include "t4_regs.h" +#include "t4fw_api.h" +#include "t4_msg.h" + +#define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++) + +static int get_msix_idx_from_bmap(struct adapter *adap) +{ + struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; + unsigned long flags; + unsigned int msix_idx; + + spin_lock_irqsave(&bmap->lock, flags); + msix_idx = find_first_zero_bit(bmap->msix_bmap, bmap->mapsize); + if (msix_idx < bmap->mapsize) { + __set_bit(msix_idx, bmap->msix_bmap); + } else { + spin_unlock_irqrestore(&bmap->lock, flags); + return -ENOSPC; + } + + spin_unlock_irqrestore(&bmap->lock, flags); + return msix_idx; +} + +static void free_msix_idx_in_bmap(struct adapter *adap, unsigned int msix_idx) +{ + struct uld_msix_bmap *bmap = &adap->msix_bmap_ulds; + unsigned long flags; + + spin_lock_irqsave(&bmap->lock, flags); + __clear_bit(msix_idx, bmap->msix_bmap); + spin_unlock_irqrestore(&bmap->lock, flags); +} + +static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, + const struct pkt_gl *gl) +{ + struct adapter *adap = q->adap; + struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); + int ret; + + /* FW can send CPLs encapsulated in a CPL_FW4_MSG */ + if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG && + ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL) + rsp += 2; + + if (q->flush_handler) + ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, + rsp, gl, &q->lro_mgr, + &q->napi); + else + ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, + rsp, gl); + + if (ret) { + rxq->stats.nomem++; + return -1; + } + + if (!gl) + rxq->stats.imm++; + else if (gl == CXGB4_MSG_AN) + rxq->stats.an++; + else + rxq->stats.pkts++; + return 0; +} + +static int alloc_uld_rxqs(struct adapter *adap, + struct sge_uld_rxq_info *rxq_info, + unsigned int nq, unsigned int offset, bool lro) +{ + struct sge *s = &adap->sge; + struct sge_ofld_rxq *q = rxq_info->uldrxq + offset; + unsigned short *ids = rxq_info->rspq_id + offset; + unsigned int per_chan = nq / adap->params.nports; + unsigned int msi_idx, bmap_idx; + int i, err; + + if (adap->flags & USING_MSIX) + msi_idx = 1; + else + msi_idx = -((int)s->intrq.abs_id + 1); + + for (i = 0; i < nq; i++, q++) { + if (msi_idx >= 0) { + bmap_idx = get_msix_idx_from_bmap(adap); + adap->msi_idx++; + } + err = t4_sge_alloc_rxq(adap, &q->rspq, false, + adap->port[i / per_chan], + adap->msi_idx, + q->fl.size ? &q->fl : NULL, + uldrx_handler, + NULL, + 0); + if (err) + goto freeout; + if (msi_idx >= 0) + rxq_info->msix_tbl[i + offset] = bmap_idx; + memset(&q->stats, 0, sizeof(q->stats)); + if (ids) + ids[i] = q->rspq.abs_id; + } + return 0; +freeout: + q = rxq_info->uldrxq + offset; + for ( ; i; i--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } + + /* We need to free rxq also in case of ciq allocation failure */ + if (offset) { + q = rxq_info->uldrxq + offset; + for ( ; i; i--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } + } + return err; +} + +int setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + if (adap->flags & USING_MSIX) { + rxq_info->msix_tbl = kzalloc(rxq_info->nrxq + rxq_info->nciq, + GFP_KERNEL); + if (!rxq_info->msix_tbl) + return -ENOMEM; + } + + return !(!alloc_uld_rxqs(adap, rxq_info, rxq_info->nrxq, 0, lro) && + !alloc_uld_rxqs(adap, rxq_info, rxq_info->nciq, + rxq_info->nrxq, lro)); +} + +static void t4_free_uld_rxqs(struct adapter *adap, int n, + struct sge_ofld_rxq *q) +{ + for ( ; n; n--, q++) { + if (q->rspq.desc) + free_rspq_fl(adap, &q->rspq, + q->fl.size ? &q->fl : NULL); + adap->msi_idx--; + } +} + +void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + if (rxq_info->nciq) + t4_free_uld_rxqs(adap, rxq_info->nciq, + rxq_info->uldrxq + rxq_info->nrxq); + t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq); + if (adap->flags & USING_MSIX) + kfree(rxq_info->msix_tbl); +} + +int cfg_queues_uld(struct adapter *adap, unsigned int uld_type, + const struct cxgb4_pci_uld_info *uld_info) +{ + struct sge *s = &adap->sge; + struct sge_uld_rxq_info *rxq_info; + int i, nrxq; + + rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL); + if (!rxq_info) + return -ENOMEM; + + if (uld_info->nrxq > s->nqs_per_uld) + rxq_info->nrxq = s->nqs_per_uld; + else + rxq_info->nrxq = uld_info->nrxq; + if (!uld_info->nciq) + rxq_info->nciq = 0; + else if (uld_info->nciq && uld_info->nciq > s->nqs_per_uld) + rxq_info->nciq = s->nqs_per_uld; + else + rxq_info->nciq = uld_info->nciq; + + nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */ + rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq), + GFP_KERNEL); + if (!rxq_info->uldrxq) { + kfree(rxq_info); + return -ENOMEM; + } + + rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL); + if (!rxq_info->uldrxq) { + kfree(rxq_info->uldrxq); + kfree(rxq_info); + return -ENOMEM; + } + + for (i = 0; i < rxq_info->nrxq; i++) { + struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64); + r->rspq.uld = uld_type; + r->fl.size = 72; + } + + for (i = rxq_info->nrxq; i < nrxq; i++) { + struct sge_ofld_rxq *r = &rxq_info->uldrxq[i]; + + init_rspq(adap, &r->rspq, 5, 1, uld_info->ciq_size, 64); + r->rspq.uld = uld_type; + r->fl.size = 72; + } + + memcpy(rxq_info->name, uld_info->name, IFNAMSIZ); + adap->sge.uld_rxq_info[uld_type] = rxq_info; + + return 0; +} + +void free_queues_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + kfree(rxq_info->rspq_id); + kfree(rxq_info->uldrxq); + kfree(rxq_info); +} + +int request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx, bmap_idx, err = 0; + + for_each_uldrxq(rxq_info, idx) { + bmap_idx = rxq_info->msix_tbl[idx]; + err = request_irq(adap->msix_info_ulds[bmap_idx].vec, + t4_sge_intr_msix, 0, + adap->msix_info_ulds[bmap_idx].desc, + &rxq_info->uldrxq[idx].rspq); + if (err) + goto unwind; + } + return 0; +unwind: + while (--idx >= 0) { + bmap_idx = rxq_info->msix_tbl[idx]; + free_msix_idx_in_bmap(adap, bmap_idx); + free_irq(adap->msix_info_ulds[bmap_idx].vec, + &rxq_info->uldrxq[idx].rspq); + } + return err; +} + +void free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) { + unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + + free_msix_idx_in_bmap(adap, bmap_idx); + free_irq(adap->msix_info_ulds[bmap_idx].vec, + &rxq_info->uldrxq[idx].rspq); + } +} + +void name_msix_vecs_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int n = sizeof(adap->msix_info_ulds[0].desc); + int idx; + + for_each_uldrxq(rxq_info, idx) { + unsigned int bmap_idx = rxq_info->msix_tbl[idx]; + + snprintf(adap->msix_info_ulds[bmap_idx].desc, n, "%s-%s%d", + adap->port[0]->name, rxq_info->name, idx); + } +} + +static void enable_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (!q) + return; + + if (q->handler) { + cxgb_busy_poll_init_lock(q); + napi_enable(&q->napi); + } + /* 0-increment GTS to start the timer and enable interrupts */ + t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), + SEINTARM_V(q->intr_params) | + INGRESSQID_V(q->cntxt_id)); +} + +static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) +{ + if (q && q->handler) { + napi_disable(&q->napi); + local_bh_disable(); + while (!cxgb_poll_lock_napi(q)) + mdelay(1); + local_bh_enable(); + } +} + +void enable_rx_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) + enable_rx(adap, &rxq_info->uldrxq[idx].rspq); +} + +void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + int idx; + + for_each_uldrxq(rxq_info, idx) + quiesce_rx(adap, &rxq_info->uldrxq[idx].rspq); +} + +static void uld_queue_init(struct adapter *adap, unsigned int uld_type, + struct cxgb4_lld_info *lli) +{ + struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type]; + + lli->rxq_ids = rxq_info->rspq_id; + lli->nrxq = rxq_info->nrxq; + lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq; + lli->nciq = rxq_info->nciq; +} + +int uld_mem_alloc(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + adap->uld = kcalloc(adap->num_uld, sizeof(*adap->uld), GFP_KERNEL); + if (!adap->uld) + return -ENOMEM; + + s->uld_rxq_info = kzalloc(adap->num_uld * + sizeof(struct sge_uld_rxq_info *), + GFP_KERNEL); + if (!s->uld_rxq_info) + goto err_uld; + + return 0; +err_uld: + kfree(adap->uld); + return -ENOMEM; +} + +void uld_mem_free(struct adapter *adap) +{ + struct sge *s = &adap->sge; + + kfree(s->uld_rxq_info); + kfree(adap->uld); +} + +static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) +{ + int i; + + lld->pdev = adap->pdev; + lld->pf = adap->pf; + lld->l2t = adap->l2t; + lld->tids = &adap->tids; + lld->ports = adap->port; + lld->vr = &adap->vres; + lld->mtus = adap->params.mtus; + lld->ntxq = adap->sge.iscsiqsets; + lld->nchan = adap->params.nports; + lld->nports = adap->params.nports; + lld->wr_cred = adap->params.ofldq_wr_cred; + lld->adapter_type = adap->params.chip; + lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; + lld->udb_density = 1 << adap->params.sge.eq_qpp; + lld->ucq_density = 1 << adap->params.sge.iq_qpp; + lld->filt_mode = adap->params.tp.vlan_pri_map; + /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ + for (i = 0; i < NCHAN; i++) + lld->tx_modq[i] = i; + lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); + lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); + lld->fw_vers = adap->params.fw_vers; + lld->dbfifo_int_thresh = dbfifo_int_thresh; + lld->sge_ingpadboundary = adap->sge.fl_align; + lld->sge_egrstatuspagesize = adap->sge.stat_len; + lld->sge_pktshift = adap->sge.pktshift; + lld->enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; + lld->max_ordird_qp = adap->params.max_ordird_qp; + lld->max_ird_adapter = adap->params.max_ird_adapter; + lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; + lld->nodeid = dev_to_node(adap->pdev_dev); +} + +static void uld_attach(struct adapter *adap, unsigned int uld) +{ + void *handle; + struct cxgb4_lld_info lli; + + uld_init(adap, &lli); + uld_queue_init(adap, uld, &lli); + + handle = adap->uld[uld].add(&lli); + if (IS_ERR(handle)) { + dev_warn(adap->pdev_dev, + "could not attach to the %s driver, error %ld\n", + adap->uld[uld].name, PTR_ERR(handle)); + return; + } + + adap->uld[uld].handle = handle; + + if (adap->flags & FULL_INIT_DONE) + adap->uld[uld].state_change(handle, CXGB4_STATE_UP); +} + +int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, + struct cxgb4_pci_uld_info *p) +{ + int ret = 0; + struct adapter *adap; + + if (type >= CXGB4_PCI_ULD_MAX) + return -EINVAL; + + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) { + if (!is_pci_uld(adap)) + continue; + ret = cfg_queues_uld(adap, type, p); + if (ret) + goto out; + ret = setup_sge_queues_uld(adap, type, p->lro); + if (ret) + goto free_queues; + if (adap->flags & USING_MSIX) { + name_msix_vecs_uld(adap, type); + ret = request_msix_queue_irqs_uld(adap, type); + if (ret) + goto free_rxq; + } + if (adap->flags & FULL_INIT_DONE) + enable_rx_uld(adap, type); + if (adap->uld[type].add) { + ret = -EBUSY; + goto free_irq; + } + adap->uld[type] = *p; + uld_attach(adap, type); + } + mutex_unlock(&uld_mutex); + return 0; + +free_irq: + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); +free_rxq: + free_sge_queues_uld(adap, type); +free_queues: + free_queues_uld(adap, type); +out: + mutex_unlock(&uld_mutex); + return ret; +} +EXPORT_SYMBOL(cxgb4_register_pci_uld); + +int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type) +{ + struct adapter *adap; + + if (type >= CXGB4_PCI_ULD_MAX) + return -EINVAL; + + mutex_lock(&uld_mutex); + list_for_each_entry(adap, &adapter_list, list_node) { + if (!is_pci_uld(adap)) + continue; + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + } + mutex_unlock(&uld_mutex); + + return 0; +} +EXPORT_SYMBOL(cxgb4_unregister_pci_uld); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index f3c58aaa932d..ab4037222f8d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -32,8 +32,8 @@ * SOFTWARE. */ -#ifndef __CXGB4_OFLD_H -#define __CXGB4_OFLD_H +#ifndef __CXGB4_ULD_H +#define __CXGB4_ULD_H #include <linux/cache.h> #include <linux/spinlock.h> @@ -296,8 +296,36 @@ struct cxgb4_uld_info { void (*lro_flush)(struct t4_lro_mgr *); }; +enum cxgb4_pci_uld { + CXGB4_PCI_ULD1, + CXGB4_PCI_ULD_MAX +}; + +struct cxgb4_pci_uld_info { + const char *name; + bool lro; + void *handle; + unsigned int nrxq; + unsigned int nciq; + unsigned int rxq_size; + unsigned int ciq_size; + void *(*add)(const struct cxgb4_lld_info *p); + int (*rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl); + int (*state_change)(void *handle, enum cxgb4_state new_state); + int (*control)(void *handle, enum cxgb4_control control, ...); + int (*lro_rx_handler)(void *handle, const __be64 *rsp, + const struct pkt_gl *gl, + struct t4_lro_mgr *lro_mgr, + struct napi_struct *napi); + void (*lro_flush)(struct t4_lro_mgr *); +}; + int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p); int cxgb4_unregister_uld(enum cxgb4_uld type); +int cxgb4_register_pci_uld(enum cxgb4_pci_uld type, + struct cxgb4_pci_uld_info *p); +int cxgb4_unregister_pci_uld(enum cxgb4_pci_uld type); int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb); unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo); unsigned int cxgb4_port_chan(const struct net_device *dev); @@ -330,4 +358,4 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev, u64 *pbar2_qoffset, unsigned int *pbar2_qid); -#endif /* !__CXGB4_OFLD_H */ +#endif /* !__CXGB4_ULD_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index ad3552df0545..9a607dbc6ca8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -2928,8 +2928,8 @@ static void free_txq(struct adapter *adap, struct sge_txq *q) q->desc = NULL; } -static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, - struct sge_fl *fl) +void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, + struct sge_fl *fl) { struct sge *s = &adap->sge; unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h index e0ebe1378cb2..fba3b2ad382d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h @@ -61,6 +61,7 @@ enum { CPL_ABORT_REQ_RSS = 0x2B, CPL_ABORT_RPL_RSS = 0x2D, + CPL_RX_PHYS_ADDR = 0x30, CPL_CLOSE_CON_RPL = 0x32, CPL_ISCSI_HDR = 0x33, CPL_RDMA_CQE = 0x35, @@ -83,6 +84,10 @@ enum { CPL_PASS_OPEN_REQ6 = 0x81, CPL_ACT_OPEN_REQ6 = 0x83, + CPL_TX_TLS_PDU = 0x88, + CPL_TX_SEC_PDU = 0x8A, + CPL_TX_TLS_ACK = 0x8B, + CPL_RDMA_TERMINATE = 0xA2, CPL_RDMA_WRITE = 0xA4, CPL_SGE_EGR_UPDATE = 0xA5, @@ -94,6 +99,8 @@ enum { CPL_FW4_PLD = 0xC1, CPL_FW4_ACK = 0xC3, + CPL_RX_PHYS_DSGL = 0xD0, + CPL_FW6_MSG = 0xE0, CPL_FW6_PLD = 0xE1, CPL_TX_PKT_LSO = 0xED, @@ -1362,6 +1369,15 @@ struct ulptx_idata { __be32 len; }; +struct ulp_txpkt { + __be32 cmd_dest; + __be32 len; +}; + +#define ULPTX_CMD_S 24 +#define ULPTX_CMD_M 0xFF +#define ULPTX_CMD_V(x) ((x) << ULPTX_CMD_S) + #define ULPTX_NSGE_S 0 #define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) @@ -1369,6 +1385,22 @@ struct ulptx_idata { #define ULPTX_MORE_V(x) ((x) << ULPTX_MORE_S) #define ULPTX_MORE_F ULPTX_MORE_V(1U) +#define ULP_TXPKT_DEST_S 16 +#define ULP_TXPKT_DEST_M 0x3 +#define ULP_TXPKT_DEST_V(x) ((x) << ULP_TXPKT_DEST_S) + +#define ULP_TXPKT_FID_S 4 +#define ULP_TXPKT_FID_M 0x7ff +#define ULP_TXPKT_FID_V(x) ((x) << ULP_TXPKT_FID_S) + +#define ULP_TXPKT_RO_S 3 +#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S) +#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U) + +#define ULP_TX_SC_MORE_S 23 +#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S) +#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U) + struct ulp_mem_io { WR_HDR; __be32 cmd; @@ -1406,4 +1438,409 @@ struct ulp_mem_io { #define ULP_MEMIO_DATA_LEN_S 0 #define ULP_MEMIO_DATA_LEN_V(x) ((x) << ULP_MEMIO_DATA_LEN_S) +#define ULPTX_NSGE_S 0 +#define ULPTX_NSGE_M 0xFFFF +#define ULPTX_NSGE_V(x) ((x) << ULPTX_NSGE_S) +#define ULPTX_NSGE_G(x) (((x) >> ULPTX_NSGE_S) & ULPTX_NSGE_M) + +struct ulptx_sc_memrd { + __be32 cmd_to_len; + __be32 addr; +}; + +#define ULP_TXPKT_DATAMODIFY_S 23 +#define ULP_TXPKT_DATAMODIFY_M 0x1 +#define ULP_TXPKT_DATAMODIFY_V(x) ((x) << ULP_TXPKT_DATAMODIFY_S) +#define ULP_TXPKT_DATAMODIFY_G(x) \ + (((x) >> ULP_TXPKT_DATAMODIFY_S) & ULP_TXPKT_DATAMODIFY__M) +#define ULP_TXPKT_DATAMODIFY_F ULP_TXPKT_DATAMODIFY_V(1U) + +#define ULP_TXPKT_CHANNELID_S 22 +#define ULP_TXPKT_CHANNELID_M 0x1 +#define ULP_TXPKT_CHANNELID_V(x) ((x) << ULP_TXPKT_CHANNELID_S) +#define ULP_TXPKT_CHANNELID_G(x) \ + (((x) >> ULP_TXPKT_CHANNELID_S) & ULP_TXPKT_CHANNELID_M) +#define ULP_TXPKT_CHANNELID_F ULP_TXPKT_CHANNELID_V(1U) + +#define SCMD_SEQ_NO_CTRL_S 29 +#define SCMD_SEQ_NO_CTRL_M 0x3 +#define SCMD_SEQ_NO_CTRL_V(x) ((x) << SCMD_SEQ_NO_CTRL_S) +#define SCMD_SEQ_NO_CTRL_G(x) \ + (((x) >> SCMD_SEQ_NO_CTRL_S) & SCMD_SEQ_NO_CTRL_M) + +/* StsFieldPrsnt- Status field at the end of the TLS PDU */ +#define SCMD_STATUS_PRESENT_S 28 +#define SCMD_STATUS_PRESENT_M 0x1 +#define SCMD_STATUS_PRESENT_V(x) ((x) << SCMD_STATUS_PRESENT_S) +#define SCMD_STATUS_PRESENT_G(x) \ + (((x) >> SCMD_STATUS_PRESENT_S) & SCMD_STATUS_PRESENT_M) +#define SCMD_STATUS_PRESENT_F SCMD_STATUS_PRESENT_V(1U) + +/* ProtoVersion - Protocol Version 0: 1.2, 1:1.1, 2:DTLS, 3:Generic, + * 3-15: Reserved. + */ +#define SCMD_PROTO_VERSION_S 24 +#define SCMD_PROTO_VERSION_M 0xf +#define SCMD_PROTO_VERSION_V(x) ((x) << SCMD_PROTO_VERSION_S) +#define SCMD_PROTO_VERSION_G(x) \ + (((x) >> SCMD_PROTO_VERSION_S) & SCMD_PROTO_VERSION_M) + +/* EncDecCtrl - Encryption/Decryption Control. 0: Encrypt, 1: Decrypt */ +#define SCMD_ENC_DEC_CTRL_S 23 +#define SCMD_ENC_DEC_CTRL_M 0x1 +#define SCMD_ENC_DEC_CTRL_V(x) ((x) << SCMD_ENC_DEC_CTRL_S) +#define SCMD_ENC_DEC_CTRL_G(x) \ + (((x) >> SCMD_ENC_DEC_CTRL_S) & SCMD_ENC_DEC_CTRL_M) +#define SCMD_ENC_DEC_CTRL_F SCMD_ENC_DEC_CTRL_V(1U) + +/* CipherAuthSeqCtrl - Cipher Authentication Sequence Control. */ +#define SCMD_CIPH_AUTH_SEQ_CTRL_S 22 +#define SCMD_CIPH_AUTH_SEQ_CTRL_M 0x1 +#define SCMD_CIPH_AUTH_SEQ_CTRL_V(x) \ + ((x) << SCMD_CIPH_AUTH_SEQ_CTRL_S) +#define SCMD_CIPH_AUTH_SEQ_CTRL_G(x) \ + (((x) >> SCMD_CIPH_AUTH_SEQ_CTRL_S) & SCMD_CIPH_AUTH_SEQ_CTRL_M) +#define SCMD_CIPH_AUTH_SEQ_CTRL_F SCMD_CIPH_AUTH_SEQ_CTRL_V(1U) + +/* CiphMode - Cipher Mode. 0: NOP, 1:AES-CBC, 2:AES-GCM, 3:AES-CTR, + * 4:Generic-AES, 5-15: Reserved. + */ +#define SCMD_CIPH_MODE_S 18 +#define SCMD_CIPH_MODE_M 0xf +#define SCMD_CIPH_MODE_V(x) ((x) << SCMD_CIPH_MODE_S) +#define SCMD_CIPH_MODE_G(x) \ + (((x) >> SCMD_CIPH_MODE_S) & SCMD_CIPH_MODE_M) + +/* AuthMode - Auth Mode. 0: NOP, 1:SHA1, 2:SHA2-224, 3:SHA2-256 + * 4-15: Reserved + */ +#define SCMD_AUTH_MODE_S 14 +#define SCMD_AUTH_MODE_M 0xf +#define SCMD_AUTH_MODE_V(x) ((x) << SCMD_AUTH_MODE_S) +#define SCMD_AUTH_MODE_G(x) \ + (((x) >> SCMD_AUTH_MODE_S) & SCMD_AUTH_MODE_M) + +/* HmacCtrl - HMAC Control. 0:NOP, 1:No truncation, 2:Support HMAC Truncation + * per RFC 4366, 3:IPSec 96 bits, 4-7:Reserved + */ +#define SCMD_HMAC_CTRL_S 11 +#define SCMD_HMAC_CTRL_M 0x7 +#define SCMD_HMAC_CTRL_V(x) ((x) << SCMD_HMAC_CTRL_S) +#define SCMD_HMAC_CTRL_G(x) \ + (((x) >> SCMD_HMAC_CTRL_S) & SCMD_HMAC_CTRL_M) + +/* IvSize - IV size in units of 2 bytes */ +#define SCMD_IV_SIZE_S 7 +#define SCMD_IV_SIZE_M 0xf +#define SCMD_IV_SIZE_V(x) ((x) << SCMD_IV_SIZE_S) +#define SCMD_IV_SIZE_G(x) \ + (((x) >> SCMD_IV_SIZE_S) & SCMD_IV_SIZE_M) + +/* NumIVs - Number of IVs */ +#define SCMD_NUM_IVS_S 0 +#define SCMD_NUM_IVS_M 0x7f +#define SCMD_NUM_IVS_V(x) ((x) << SCMD_NUM_IVS_S) +#define SCMD_NUM_IVS_G(x) \ + (((x) >> SCMD_NUM_IVS_S) & SCMD_NUM_IVS_M) + +/* EnbDbgId - If this is enabled upper 20 (63:44) bits if SeqNumber + * (below) are used as Cid (connection id for debug status), these + * bits are padded to zero for forming the 64 bit + * sequence number for TLS + */ +#define SCMD_ENB_DBGID_S 31 +#define SCMD_ENB_DBGID_M 0x1 +#define SCMD_ENB_DBGID_V(x) ((x) << SCMD_ENB_DBGID_S) +#define SCMD_ENB_DBGID_G(x) \ + (((x) >> SCMD_ENB_DBGID_S) & SCMD_ENB_DBGID_M) + +/* IV generation in SW. */ +#define SCMD_IV_GEN_CTRL_S 30 +#define SCMD_IV_GEN_CTRL_M 0x1 +#define SCMD_IV_GEN_CTRL_V(x) ((x) << SCMD_IV_GEN_CTRL_S) +#define SCMD_IV_GEN_CTRL_G(x) \ + (((x) >> SCMD_IV_GEN_CTRL_S) & SCMD_IV_GEN_CTRL_M) +#define SCMD_IV_GEN_CTRL_F SCMD_IV_GEN_CTRL_V(1U) + +/* More frags */ +#define SCMD_MORE_FRAGS_S 20 +#define SCMD_MORE_FRAGS_M 0x1 +#define SCMD_MORE_FRAGS_V(x) ((x) << SCMD_MORE_FRAGS_S) +#define SCMD_MORE_FRAGS_G(x) (((x) >> SCMD_MORE_FRAGS_S) & SCMD_MORE_FRAGS_M) + +/*last frag */ +#define SCMD_LAST_FRAG_S 19 +#define SCMD_LAST_FRAG_M 0x1 +#define SCMD_LAST_FRAG_V(x) ((x) << SCMD_LAST_FRAG_S) +#define SCMD_LAST_FRAG_G(x) (((x) >> SCMD_LAST_FRAG_S) & SCMD_LAST_FRAG_M) + +/* TlsCompPdu */ +#define SCMD_TLS_COMPPDU_S 18 +#define SCMD_TLS_COMPPDU_M 0x1 +#define SCMD_TLS_COMPPDU_V(x) ((x) << SCMD_TLS_COMPPDU_S) +#define SCMD_TLS_COMPPDU_G(x) (((x) >> SCMD_TLS_COMPPDU_S) & SCMD_TLS_COMPPDU_M) + +/* KeyCntxtInline - Key context inline after the scmd OR PayloadOnly*/ +#define SCMD_KEY_CTX_INLINE_S 17 +#define SCMD_KEY_CTX_INLINE_M 0x1 +#define SCMD_KEY_CTX_INLINE_V(x) ((x) << SCMD_KEY_CTX_INLINE_S) +#define SCMD_KEY_CTX_INLINE_G(x) \ + (((x) >> SCMD_KEY_CTX_INLINE_S) & SCMD_KEY_CTX_INLINE_M) +#define SCMD_KEY_CTX_INLINE_F SCMD_KEY_CTX_INLINE_V(1U) + +/* TLSFragEnable - 0: Host created TLS PDUs, 1: TLS Framgmentation in ASIC */ +#define SCMD_TLS_FRAG_ENABLE_S 16 +#define SCMD_TLS_FRAG_ENABLE_M 0x1 +#define SCMD_TLS_FRAG_ENABLE_V(x) ((x) << SCMD_TLS_FRAG_ENABLE_S) +#define SCMD_TLS_FRAG_ENABLE_G(x) \ + (((x) >> SCMD_TLS_FRAG_ENABLE_S) & SCMD_TLS_FRAG_ENABLE_M) +#define SCMD_TLS_FRAG_ENABLE_F SCMD_TLS_FRAG_ENABLE_V(1U) + +/* MacOnly - Only send the MAC and discard PDU. This is valid for hash only + * modes, in this case TLS_TX will drop the PDU and only + * send back the MAC bytes. + */ +#define SCMD_MAC_ONLY_S 15 +#define SCMD_MAC_ONLY_M 0x1 +#define SCMD_MAC_ONLY_V(x) ((x) << SCMD_MAC_ONLY_S) +#define SCMD_MAC_ONLY_G(x) \ + (((x) >> SCMD_MAC_ONLY_S) & SCMD_MAC_ONLY_M) +#define SCMD_MAC_ONLY_F SCMD_MAC_ONLY_V(1U) + +/* AadIVDrop - Drop the AAD and IV fields. Useful in protocols + * which have complex AAD and IV formations Eg:AES-CCM + */ +#define SCMD_AADIVDROP_S 14 +#define SCMD_AADIVDROP_M 0x1 +#define SCMD_AADIVDROP_V(x) ((x) << SCMD_AADIVDROP_S) +#define SCMD_AADIVDROP_G(x) \ + (((x) >> SCMD_AADIVDROP_S) & SCMD_AADIVDROP_M) +#define SCMD_AADIVDROP_F SCMD_AADIVDROP_V(1U) + +/* HdrLength - Length of all headers excluding TLS header + * present before start of crypto PDU/payload. + */ +#define SCMD_HDR_LEN_S 0 +#define SCMD_HDR_LEN_M 0x3fff +#define SCMD_HDR_LEN_V(x) ((x) << SCMD_HDR_LEN_S) +#define SCMD_HDR_LEN_G(x) \ + (((x) >> SCMD_HDR_LEN_S) & SCMD_HDR_LEN_M) + +struct cpl_tx_sec_pdu { + __be32 op_ivinsrtofst; + __be32 pldlen; + __be32 aadstart_cipherstop_hi; + __be32 cipherstop_lo_authinsert; + __be32 seqno_numivs; + __be32 ivgen_hdrlen; + __be64 scmd1; +}; + +#define CPL_TX_SEC_PDU_OPCODE_S 24 +#define CPL_TX_SEC_PDU_OPCODE_M 0xff +#define CPL_TX_SEC_PDU_OPCODE_V(x) ((x) << CPL_TX_SEC_PDU_OPCODE_S) +#define CPL_TX_SEC_PDU_OPCODE_G(x) \ + (((x) >> CPL_TX_SEC_PDU_OPCODE_S) & CPL_TX_SEC_PDU_OPCODE_M) + +/* RX Channel Id */ +#define CPL_TX_SEC_PDU_RXCHID_S 22 +#define CPL_TX_SEC_PDU_RXCHID_M 0x1 +#define CPL_TX_SEC_PDU_RXCHID_V(x) ((x) << CPL_TX_SEC_PDU_RXCHID_S) +#define CPL_TX_SEC_PDU_RXCHID_G(x) \ + (((x) >> CPL_TX_SEC_PDU_RXCHID_S) & CPL_TX_SEC_PDU_RXCHID_M) +#define CPL_TX_SEC_PDU_RXCHID_F CPL_TX_SEC_PDU_RXCHID_V(1U) + +/* Ack Follows */ +#define CPL_TX_SEC_PDU_ACKFOLLOWS_S 21 +#define CPL_TX_SEC_PDU_ACKFOLLOWS_M 0x1 +#define CPL_TX_SEC_PDU_ACKFOLLOWS_V(x) ((x) << CPL_TX_SEC_PDU_ACKFOLLOWS_S) +#define CPL_TX_SEC_PDU_ACKFOLLOWS_G(x) \ + (((x) >> CPL_TX_SEC_PDU_ACKFOLLOWS_S) & CPL_TX_SEC_PDU_ACKFOLLOWS_M) +#define CPL_TX_SEC_PDU_ACKFOLLOWS_F CPL_TX_SEC_PDU_ACKFOLLOWS_V(1U) + +/* Loopback bit in cpl_tx_sec_pdu */ +#define CPL_TX_SEC_PDU_ULPTXLPBK_S 20 +#define CPL_TX_SEC_PDU_ULPTXLPBK_M 0x1 +#define CPL_TX_SEC_PDU_ULPTXLPBK_V(x) ((x) << CPL_TX_SEC_PDU_ULPTXLPBK_S) +#define CPL_TX_SEC_PDU_ULPTXLPBK_G(x) \ + (((x) >> CPL_TX_SEC_PDU_ULPTXLPBK_S) & CPL_TX_SEC_PDU_ULPTXLPBK_M) +#define CPL_TX_SEC_PDU_ULPTXLPBK_F CPL_TX_SEC_PDU_ULPTXLPBK_V(1U) + +/* Length of cpl header encapsulated */ +#define CPL_TX_SEC_PDU_CPLLEN_S 16 +#define CPL_TX_SEC_PDU_CPLLEN_M 0xf +#define CPL_TX_SEC_PDU_CPLLEN_V(x) ((x) << CPL_TX_SEC_PDU_CPLLEN_S) +#define CPL_TX_SEC_PDU_CPLLEN_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CPLLEN_S) & CPL_TX_SEC_PDU_CPLLEN_M) + +/* PlaceHolder */ +#define CPL_TX_SEC_PDU_PLACEHOLDER_S 10 +#define CPL_TX_SEC_PDU_PLACEHOLDER_M 0x1 +#define CPL_TX_SEC_PDU_PLACEHOLDER_V(x) ((x) << CPL_TX_SEC_PDU_PLACEHOLDER_S) +#define CPL_TX_SEC_PDU_PLACEHOLDER_G(x) \ + (((x) >> CPL_TX_SEC_PDU_PLACEHOLDER_S) & \ + CPL_TX_SEC_PDU_PLACEHOLDER_M) + +/* IvInsrtOffset: Insertion location for IV */ +#define CPL_TX_SEC_PDU_IVINSRTOFST_S 0 +#define CPL_TX_SEC_PDU_IVINSRTOFST_M 0x3ff +#define CPL_TX_SEC_PDU_IVINSRTOFST_V(x) ((x) << CPL_TX_SEC_PDU_IVINSRTOFST_S) +#define CPL_TX_SEC_PDU_IVINSRTOFST_G(x) \ + (((x) >> CPL_TX_SEC_PDU_IVINSRTOFST_S) & \ + CPL_TX_SEC_PDU_IVINSRTOFST_M) + +/* AadStartOffset: Offset in bytes for AAD start from + * the first byte following the pkt headers (0-255 bytes) + */ +#define CPL_TX_SEC_PDU_AADSTART_S 24 +#define CPL_TX_SEC_PDU_AADSTART_M 0xff +#define CPL_TX_SEC_PDU_AADSTART_V(x) ((x) << CPL_TX_SEC_PDU_AADSTART_S) +#define CPL_TX_SEC_PDU_AADSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AADSTART_S) & \ + CPL_TX_SEC_PDU_AADSTART_M) + +/* AadStopOffset: offset in bytes for AAD stop/end from the first byte following + * the pkt headers (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_AADSTOP_S 15 +#define CPL_TX_SEC_PDU_AADSTOP_M 0x1ff +#define CPL_TX_SEC_PDU_AADSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AADSTOP_S) +#define CPL_TX_SEC_PDU_AADSTOP_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AADSTOP_S) & CPL_TX_SEC_PDU_AADSTOP_M) + +/* CipherStartOffset: offset in bytes for encryption/decryption start from the + * first byte following the pkt headers (0-1023 bytes) + */ +#define CPL_TX_SEC_PDU_CIPHERSTART_S 5 +#define CPL_TX_SEC_PDU_CIPHERSTART_M 0x3ff +#define CPL_TX_SEC_PDU_CIPHERSTART_V(x) ((x) << CPL_TX_SEC_PDU_CIPHERSTART_S) +#define CPL_TX_SEC_PDU_CIPHERSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTART_S) & \ + CPL_TX_SEC_PDU_CIPHERSTART_M) + +/* CipherStopOffset: offset in bytes for encryption/decryption end + * from end of the payload of this command (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_S 0 +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_M 0x1f +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_V(x) \ + ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) +#define CPL_TX_SEC_PDU_CIPHERSTOP_HI_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_HI_S) & \ + CPL_TX_SEC_PDU_CIPHERSTOP_HI_M) + +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_S 28 +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_M 0xf +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_V(x) \ + ((x) << CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) +#define CPL_TX_SEC_PDU_CIPHERSTOP_LO_G(x) \ + (((x) >> CPL_TX_SEC_PDU_CIPHERSTOP_LO_S) & \ + CPL_TX_SEC_PDU_CIPHERSTOP_LO_M) + +/* AuthStartOffset: offset in bytes for authentication start from + * the first byte following the pkt headers (0-1023) + */ +#define CPL_TX_SEC_PDU_AUTHSTART_S 18 +#define CPL_TX_SEC_PDU_AUTHSTART_M 0x3ff +#define CPL_TX_SEC_PDU_AUTHSTART_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTART_S) +#define CPL_TX_SEC_PDU_AUTHSTART_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHSTART_S) & \ + CPL_TX_SEC_PDU_AUTHSTART_M) + +/* AuthStopOffset: offset in bytes for authentication + * end from end of the payload of this command (0-511 Bytes) + */ +#define CPL_TX_SEC_PDU_AUTHSTOP_S 9 +#define CPL_TX_SEC_PDU_AUTHSTOP_M 0x1ff +#define CPL_TX_SEC_PDU_AUTHSTOP_V(x) ((x) << CPL_TX_SEC_PDU_AUTHSTOP_S) +#define CPL_TX_SEC_PDU_AUTHSTOP_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHSTOP_S) & \ + CPL_TX_SEC_PDU_AUTHSTOP_M) + +/* AuthInsrtOffset: offset in bytes for authentication insertion + * from end of the payload of this command (0-511 bytes) + */ +#define CPL_TX_SEC_PDU_AUTHINSERT_S 0 +#define CPL_TX_SEC_PDU_AUTHINSERT_M 0x1ff +#define CPL_TX_SEC_PDU_AUTHINSERT_V(x) ((x) << CPL_TX_SEC_PDU_AUTHINSERT_S) +#define CPL_TX_SEC_PDU_AUTHINSERT_G(x) \ + (((x) >> CPL_TX_SEC_PDU_AUTHINSERT_S) & \ + CPL_TX_SEC_PDU_AUTHINSERT_M) + +struct cpl_rx_phys_dsgl { + __be32 op_to_tid; + __be32 pcirlxorder_to_noofsgentr; + struct rss_header rss_hdr_int; +}; + +#define CPL_RX_PHYS_DSGL_OPCODE_S 24 +#define CPL_RX_PHYS_DSGL_OPCODE_M 0xff +#define CPL_RX_PHYS_DSGL_OPCODE_V(x) ((x) << CPL_RX_PHYS_DSGL_OPCODE_S) +#define CPL_RX_PHYS_DSGL_OPCODE_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_OPCODE_S) & CPL_RX_PHYS_DSGL_OPCODE_M) + +#define CPL_RX_PHYS_DSGL_ISRDMA_S 23 +#define CPL_RX_PHYS_DSGL_ISRDMA_M 0x1 +#define CPL_RX_PHYS_DSGL_ISRDMA_V(x) ((x) << CPL_RX_PHYS_DSGL_ISRDMA_S) +#define CPL_RX_PHYS_DSGL_ISRDMA_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_ISRDMA_S) & CPL_RX_PHYS_DSGL_ISRDMA_M) +#define CPL_RX_PHYS_DSGL_ISRDMA_F CPL_RX_PHYS_DSGL_ISRDMA_V(1U) + +#define CPL_RX_PHYS_DSGL_RSVD1_S 20 +#define CPL_RX_PHYS_DSGL_RSVD1_M 0x7 +#define CPL_RX_PHYS_DSGL_RSVD1_V(x) ((x) << CPL_RX_PHYS_DSGL_RSVD1_S) +#define CPL_RX_PHYS_DSGL_RSVD1_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_RSVD1_S) & \ + CPL_RX_PHYS_DSGL_RSVD1_M) + +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_S 31 +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_M 0x1 +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCIRLXORDER_S) +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCIRLXORDER_S) & \ + CPL_RX_PHYS_DSGL_PCIRLXORDER_M) +#define CPL_RX_PHYS_DSGL_PCIRLXORDER_F CPL_RX_PHYS_DSGL_PCIRLXORDER_V(1U) + +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_S 30 +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_M 0x1 +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCINOSNOOP_S) +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCINOSNOOP_S) & \ + CPL_RX_PHYS_DSGL_PCINOSNOOP_M) + +#define CPL_RX_PHYS_DSGL_PCINOSNOOP_F CPL_RX_PHYS_DSGL_PCINOSNOOP_V(1U) + +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_S 29 +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_M 0x1 +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_PCITPHNTENB_S) +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCITPHNTENB_S) & \ + CPL_RX_PHYS_DSGL_PCITPHNTENB_M) +#define CPL_RX_PHYS_DSGL_PCITPHNTENB_F CPL_RX_PHYS_DSGL_PCITPHNTENB_V(1U) + +#define CPL_RX_PHYS_DSGL_PCITPHNT_S 27 +#define CPL_RX_PHYS_DSGL_PCITPHNT_M 0x3 +#define CPL_RX_PHYS_DSGL_PCITPHNT_V(x) ((x) << CPL_RX_PHYS_DSGL_PCITPHNT_S) +#define CPL_RX_PHYS_DSGL_PCITPHNT_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_PCITPHNT_S) & \ + CPL_RX_PHYS_DSGL_PCITPHNT_M) + +#define CPL_RX_PHYS_DSGL_DCAID_S 16 +#define CPL_RX_PHYS_DSGL_DCAID_M 0x7ff +#define CPL_RX_PHYS_DSGL_DCAID_V(x) ((x) << CPL_RX_PHYS_DSGL_DCAID_S) +#define CPL_RX_PHYS_DSGL_DCAID_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_DCAID_S) & \ + CPL_RX_PHYS_DSGL_DCAID_M) + +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_S 0 +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_M 0xffff +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_V(x) \ + ((x) << CPL_RX_PHYS_DSGL_NOOFSGENTR_S) +#define CPL_RX_PHYS_DSGL_NOOFSGENTR_G(x) \ + (((x) >> CPL_RX_PHYS_DSGL_NOOFSGENTR_S) & \ + CPL_RX_PHYS_DSGL_NOOFSGENTR_M) + #endif /* __T4_MSG_H */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index a89b30720e38..d8f4adb9c7a6 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -102,6 +102,7 @@ enum fw_wr_opcodes { FW_RI_FR_NSMR_WR = 0x19, FW_RI_INV_LSTAG_WR = 0x1a, FW_ISCSI_TX_DATA_WR = 0x45, + FW_CRYPTO_LOOKASIDE_WR = 0X6d, FW_LASTC2E_WR = 0x70 }; @@ -1060,7 +1061,7 @@ struct fw_caps_config_cmd { __be16 niccaps; __be16 ofldcaps; __be16 rdmacaps; - __be16 r4; + __be16 cryptocaps; __be16 iscsicaps; __be16 fcoecaps; __be32 cfcsum; @@ -3249,4 +3250,127 @@ struct fw_devlog_cmd { #define PCIE_FW_PF_DEVLOG_MEMTYPE_G(x) \ (((x) >> PCIE_FW_PF_DEVLOG_MEMTYPE_S) & PCIE_FW_PF_DEVLOG_MEMTYPE_M) +#define MAX_IMM_OFLD_TX_DATA_WR_LEN (0xff + sizeof(struct fw_ofld_tx_data_wr)) + +struct fw_crypto_lookaside_wr { + __be32 op_to_cctx_size; + __be32 len16_pkd; + __be32 session_id; + __be32 rx_chid_to_rx_q_id; + __be32 key_addr; + __be32 pld_size_hash_size; + __be64 cookie; +}; + +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_S 24 +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) +#define FW_CRYPTO_LOOKASIDE_WR_OPCODE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_OPCODE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_OPCODE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_S 23 +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_M 0x1 +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_COMPL_S) +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_COMPL_S) & \ + FW_CRYPTO_LOOKASIDE_WR_COMPL_M) +#define FW_CRYPTO_LOOKASIDE_WR_COMPL_F FW_CRYPTO_LOOKASIDE_WR_COMPL_V(1U) + +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S 15 +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) +#define FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_S) & \ + FW_CRYPTO_LOOKASIDE_WR_IMM_LEN_M) + +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S 5 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_S) & \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC_M) + +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M 0x1f +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_LEN16_S) +#define FW_CRYPTO_LOOKASIDE_WR_LEN16_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_LEN16_S) & \ + FW_CRYPTO_LOOKASIDE_WR_LEN16_M) + +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S 29 +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) +#define FW_CRYPTO_LOOKASIDE_WR_RX_CHID_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_CHID_S) & \ + FW_CRYPTO_LOOKASIDE_WR_RX_CHID_M) + +#define FW_CRYPTO_LOOKASIDE_WR_LCB_S 27 +#define FW_CRYPTO_LOOKASIDE_WR_LCB_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_LCB_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_LCB_S) +#define FW_CRYPTO_LOOKASIDE_WR_LCB_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_LCB_S) & FW_CRYPTO_LOOKASIDE_WR_LCB_M) + +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_S 25 +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_PHASH_S) +#define FW_CRYPTO_LOOKASIDE_WR_PHASH_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_PHASH_S) & \ + FW_CRYPTO_LOOKASIDE_WR_PHASH_M) + +#define FW_CRYPTO_LOOKASIDE_WR_IV_S 23 +#define FW_CRYPTO_LOOKASIDE_WR_IV_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_IV_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_IV_S) +#define FW_CRYPTO_LOOKASIDE_WR_IV_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_IV_S) & FW_CRYPTO_LOOKASIDE_WR_IV_M) + +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_S 10 +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_M 0x3 +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) +#define FW_CRYPTO_LOOKASIDE_WR_TX_CH_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_TX_CH_S) & \ + FW_CRYPTO_LOOKASIDE_WR_TX_CH_M) + +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S 0 +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M 0x3ff +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) +#define FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_S) & \ + FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID_M) + +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S 24 +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M 0xff +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE_M) + +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S 17 +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M 0x7f +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(x) \ + ((x) << FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) +#define FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_G(x) \ + (((x) >> FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_S) & \ + FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_M) + #endif /* _T4FW_INTERFACE_H_ */ diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index afb5daa3721d..05bd19f9ebc5 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/vmalloc.h> @@ -2781,6 +2782,89 @@ static struct platform_driver g_dsaf_driver = { module_platform_driver(g_dsaf_driver); +/** + * hns_dsaf_roce_reset - reset dsaf and roce + * @dsaf_fwnode: Pointer to framework node for the dasf + * @enable: false - request reset , true - drop reset + * retuen 0 - success , negative -fail + */ +int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool enable) +{ + struct dsaf_device *dsaf_dev; + struct platform_device *pdev; + u32 mp; + u32 sl; + u32 credit; + int i; + const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0}, + {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, + {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1}, + }; + const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = { + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0}, + {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1}, + {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2}, + {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3}, + }; + + if (!is_of_node(dsaf_fwnode)) { + pr_err("hisi_dsaf: Only support DT node!\n"); + return -EINVAL; + } + pdev = of_find_device_by_node(to_of_node(dsaf_fwnode)); + dsaf_dev = dev_get_drvdata(&pdev->dev); + if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { + dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", + dsaf_dev->ae_dev.name); + return -ENODEV; + } + + if (!enable) { + /* Reset rocee-channels in dsaf and rocee */ + hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, false); + hns_dsaf_roce_srst(dsaf_dev, false); + } else { + /* Configure dsaf tx roce correspond to port map and sl map */ + mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG); + for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) + dsaf_set_field(mp, 7 << i * 3, i * 3, + port_map[i][DSAF_ROCE_6PORT_MODE]); + dsaf_set_field(mp, 3 << i * 3, i * 3, 0); + dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp); + + sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG); + for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++) + dsaf_set_field(sl, 3 << i * 2, i * 2, + sl_map[i][DSAF_ROCE_6PORT_MODE]); + dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl); + + /* De-reset rocee-channels in dsaf and rocee */ + hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK, true); + msleep(SRST_TIME_INTERVAL); + hns_dsaf_roce_srst(dsaf_dev, true); + + /* Eanble dsaf channel rocee credit */ + credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG); + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + + dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); + dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); + } + return 0; +} +EXPORT_SYMBOL(hns_dsaf_roce_reset); + MODULE_LICENSE("GPL"); MODULE_AUTHOR("Huawei Tech. Co., Ltd."); MODULE_DESCRIPTION("HNS DSAF driver"); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h index 1daf018d9071..f3681d566ae6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h @@ -43,6 +43,32 @@ struct hns_mac_cb; #define DSAF_PRIO_NR 8 #define DSAF_REG_PER_ZONE 3 +#define DSAF_ROCE_CREDIT_CHN 8 +#define DSAF_ROCE_CHAN_MODE 3 + +enum dsaf_roce_port_mode { + DSAF_ROCE_6PORT_MODE, + DSAF_ROCE_4PORT_MODE, + DSAF_ROCE_2PORT_MODE, + DSAF_ROCE_CHAN_MODE_NUM, +}; + +enum dsaf_roce_port_num { + DSAF_ROCE_PORT_0, + DSAF_ROCE_PORT_1, + DSAF_ROCE_PORT_2, + DSAF_ROCE_PORT_3, + DSAF_ROCE_PORT_4, + DSAF_ROCE_PORT_5, +}; + +enum dsaf_roce_qos_sl { + DSAF_ROCE_SL_0, + DSAF_ROCE_SL_1, + DSAF_ROCE_SL_2, + DSAF_ROCE_SL_3, +}; + #define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP) @@ -419,6 +445,10 @@ int hns_dsaf_get_mac_entry_by_index( void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb); +void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable); + +void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable); + int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev); void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 611b67b6f450..36b9f791cf2f 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -231,6 +231,42 @@ static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev, dsaf_write_sub(dsaf_dev, reg_addr, reg_val); } +/** + * hns_dsaf_srst_chns - reset dsaf channels + * @dsaf_dev: dsaf device struct pointer + * @msk: xbar channels mask value: + * bit0-5 for xge0-5 + * bit6-11 for ppe0-5 + * bit12-17 for roce0-5 + * bit18-19 for com/dfx + * @enable: false - request reset , true - drop reset + */ +void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool enable) +{ + u32 reg_addr; + + if (!enable) + reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG; + else + reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG; + + dsaf_write_sub(dsaf_dev, reg_addr, msk); +} + +void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool enable) +{ + if (!enable) { + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1); + } else { + dsaf_write_sub(dsaf_dev, + DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1); + dsaf_write_sub(dsaf_dev, + DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1); + msleep(20); + dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1); + } +} + static void hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h index 235f74444b1d..13c16ab7be48 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h @@ -77,6 +77,12 @@ #define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C #define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88 #define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C +#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8 +#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50 +#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC +#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C +#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54 +#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328 #define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060 #define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300 #define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300 @@ -133,6 +139,8 @@ #define DSAF_ROCEE_INT_STS_0_REG 0x200 #define DSAFV2_SERDES_LBK_0_REG 0x220 #define DSAF_PAUSE_CFG_REG 0x240 +#define DSAF_ROCE_PORT_MAP_REG 0x2A0 +#define DSAF_ROCE_SL_MAP_REG 0x2A4 #define DSAF_PPE_QID_CFG_0_REG 0x300 #define DSAF_SW_PORT_TYPE_0_REG 0x320 #define DSAF_STP_PORT_TYPE_0_REG 0x340 @@ -178,6 +186,7 @@ #define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C #define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C #define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C +#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380 #define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C #define DSAF_SBM_FREE_CNT_0_0_REG 0x2010 #define DSAF_SBM_FREE_CNT_1_0_REG 0x2014 @@ -796,6 +805,9 @@ #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9 #define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9) +#define DSAF_CHNS_MASK 0x3f000 +#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2 +#define SRST_TIME_INTERVAL 20 #define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0 #define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0) #define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 88f3c85fb04a..b942108c85c1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -203,7 +203,8 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter, struct device *dev = &adapter->vdev->dev; dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); - send_request_unmap(adapter, ltb->map_id); + if (!adapter->failover) + send_request_unmap(adapter, ltb->map_id); } static int alloc_rx_pool(struct ibmvnic_adapter *adapter, @@ -522,7 +523,8 @@ static int ibmvnic_close(struct net_device *netdev) for (i = 0; i < adapter->req_rx_queues; i++) napi_disable(&adapter->napi[i]); - netif_tx_stop_all_queues(netdev); + if (!adapter->failover) + netif_tx_stop_all_queues(netdev); if (adapter->bounce_buffer) { if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) { @@ -3280,6 +3282,10 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, rc = ibmvnic_send_crq_init(adapter); if (rc) dev_err(dev, "Error sending init rc=%ld\n", rc); + } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) { + dev_info(dev, "Backing device failover detected\n"); + netif_carrier_off(netdev); + adapter->failover = true; } else { /* The adapter lost the connection */ dev_err(dev, "Virtual Adapter failed (rc=%d)\n", @@ -3615,8 +3621,18 @@ static void handle_crq_init_rsp(struct work_struct *work) struct device *dev = &adapter->vdev->dev; struct net_device *netdev = adapter->netdev; unsigned long timeout = msecs_to_jiffies(30000); + bool restart = false; int rc; + if (adapter->failover) { + release_sub_crqs(adapter); + if (netif_running(netdev)) { + netif_tx_disable(netdev); + ibmvnic_close(netdev); + restart = true; + } + } + send_version_xchg(adapter); reinit_completion(&adapter->init_done); if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { @@ -3645,6 +3661,17 @@ static void handle_crq_init_rsp(struct work_struct *work) netdev->real_num_tx_queues = adapter->req_tx_queues; + if (adapter->failover) { + adapter->failover = false; + if (restart) { + rc = ibmvnic_open(netdev); + if (rc) + goto restart_failed; + } + netif_carrier_on(netdev); + return; + } + rc = register_netdev(netdev); if (rc) { dev_err(dev, @@ -3655,6 +3682,8 @@ static void handle_crq_init_rsp(struct work_struct *work) return; +restart_failed: + dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc); register_failed: release_sub_crqs(adapter); task_failed: @@ -3692,6 +3721,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) dev_set_drvdata(&dev->dev, netdev); adapter->vdev = dev; adapter->netdev = netdev; + adapter->failover = false; ether_addr_copy(adapter->mac_addr, mac_addr_p); ether_addr_copy(netdev->dev_addr, adapter->mac_addr); diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index e82898fd518e..bfc84c7d0e11 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -830,6 +830,7 @@ enum ibmvfc_crq_format { IBMVNIC_CRQ_INIT = 0x01, IBMVNIC_CRQ_INIT_COMPLETE = 0x02, IBMVNIC_PARTITION_MIGRATED = 0x06, + IBMVNIC_DEVICE_FAILOVER = 0x08, }; struct ibmvnic_crq_queue { @@ -1047,4 +1048,5 @@ struct ibmvnic_adapter { u8 map_id; struct work_struct vnic_crq_init; + bool failover; }; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h index 199ff98209cf..acf06051e111 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.h +++ b/drivers/net/ethernet/intel/igb/e1000_82575.h @@ -188,6 +188,11 @@ struct e1000_adv_tx_context_desc { /* ETQF register bit definitions */ #define E1000_ETQF_FILTER_ENABLE BIT(26) #define E1000_ETQF_1588 BIT(30) +#define E1000_ETQF_IMM_INT BIT(29) +#define E1000_ETQF_QUEUE_ENABLE BIT(31) +#define E1000_ETQF_QUEUE_SHIFT 16 +#define E1000_ETQF_QUEUE_MASK 0x00070000 +#define E1000_ETQF_ETYPE_MASK 0x0000FFFF /* FTQF register bit definitions */ #define E1000_FTQF_VF_BP 0x00008000 diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 2997c443c5dc..2688180a7acd 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -1024,4 +1024,8 @@ #define E1000_RTTBCNRC_RF_INT_MASK \ (E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT) +#define E1000_VLAPQF_QUEUE_SEL(_n, q_idx) (q_idx << ((_n) * 4)) +#define E1000_VLAPQF_P_VALID(_n) (0x1 << (3 + (_n) * 4)) +#define E1000_VLAPQF_QUEUE_MASK 0x03 + #endif diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 21d9d02885cb..d84afdd83e53 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -309,6 +309,7 @@ (0x054E0 + ((_i - 16) * 8))) #define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ (0x054E4 + ((_i - 16) * 8))) +#define E1000_VLAPQF 0x055B0 /* VLAN Priority Queue Filter VLAPQF */ #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8)) #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4)) #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 5387b3a96489..03fbe4b7663b 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -350,11 +350,49 @@ struct hwmon_buff { }; #endif +/* The number of L2 ether-type filter registers, Index 3 is reserved + * for PTP 1588 timestamp + */ +#define MAX_ETYPE_FILTER (4 - 1) +/* ETQF filter list: one static filter per filter consumer. This is + * to avoid filter collisions later. Add new filters here!! + * + * Current filters: Filter 3 + */ +#define IGB_ETQF_FILTER_1588 3 + #define IGB_N_EXTTS 2 #define IGB_N_PEROUT 2 #define IGB_N_SDP 4 #define IGB_RETA_SIZE 128 +enum igb_filter_match_flags { + IGB_FILTER_FLAG_ETHER_TYPE = 0x1, + IGB_FILTER_FLAG_VLAN_TCI = 0x2, +}; + +#define IGB_MAX_RXNFC_FILTERS 16 + +/* RX network flow classification data structure */ +struct igb_nfc_input { + /* Byte layout in order, all values with MSB first: + * match_flags - 1 byte + * etype - 2 bytes + * vlan_tci - 2 bytes + */ + u8 match_flags; + __be16 etype; + __be16 vlan_tci; +}; + +struct igb_nfc_filter { + struct hlist_node nfc_node; + struct igb_nfc_input filter; + u16 etype_reg_index; + u16 sw_idx; + u16 action; +}; + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -473,6 +511,13 @@ struct igb_adapter { int copper_tries; struct e1000_info ei; u16 eee_advert; + + /* RX network flow classification support */ + struct hlist_head nfc_filter_list; + unsigned int nfc_filter_count; + /* lock for RX network flow classification filter */ + spinlock_t nfc_lock; + bool etype_bitmap[MAX_ETYPE_FILTER]; }; /* flags controlling PTP/1588 function */ @@ -599,4 +644,9 @@ static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); } +int igb_add_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); +int igb_erase_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input); + #endif /* _IGB_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 64e91c575a39..0c33eca7c832 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2431,6 +2431,63 @@ static int igb_get_ts_info(struct net_device *dev, } } +#define ETHER_TYPE_FULL_MASK ((__force __be16)~0) +static int igb_get_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct igb_nfc_filter *rule = NULL; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + if (rule->filter.match_flags) { + fsp->flow_type = ETHER_FLOW; + fsp->ring_cookie = rule->action; + if (rule->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + fsp->h_u.ether_spec.h_proto = rule->filter.etype; + fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK; + } + if (rule->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) { + fsp->flow_type |= FLOW_EXT; + fsp->h_ext.vlan_tci = rule->filter.vlan_tci; + fsp->m_ext.vlan_tci = htons(VLAN_PRIO_MASK); + } + return 0; + } + return -EINVAL; +} + +static int igb_get_ethtool_nfc_all(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct igb_nfc_filter *rule; + int cnt = 0; + + /* report total rule count */ + cmd->data = IGB_MAX_RXNFC_FILTERS; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + static int igb_get_rss_hash_opts(struct igb_adapter *adapter, struct ethtool_rxnfc *cmd) { @@ -2484,6 +2541,16 @@ static int igb_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, cmd->data = adapter->num_rx_queues; ret = 0; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = adapter->nfc_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = igb_get_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = igb_get_ethtool_nfc_all(adapter, cmd, rule_locs); + break; case ETHTOOL_GRXFH: ret = igb_get_rss_hash_opts(adapter, cmd); break; @@ -2598,6 +2665,279 @@ static int igb_set_rss_hash_opt(struct igb_adapter *adapter, return 0; } +static int igb_rxnfc_write_etype_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 i; + u32 etqf; + u16 etype; + + /* find an empty etype filter register */ + for (i = 0; i < MAX_ETYPE_FILTER; ++i) { + if (!adapter->etype_bitmap[i]) + break; + } + if (i == MAX_ETYPE_FILTER) { + dev_err(&adapter->pdev->dev, "ethtool -N: etype filters are all used.\n"); + return -EINVAL; + } + + adapter->etype_bitmap[i] = true; + + etqf = rd32(E1000_ETQF(i)); + etype = ntohs(input->filter.etype & ETHER_TYPE_FULL_MASK); + + etqf |= E1000_ETQF_FILTER_ENABLE; + etqf &= ~E1000_ETQF_ETYPE_MASK; + etqf |= (etype & E1000_ETQF_ETYPE_MASK); + + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf |= ((input->action << E1000_ETQF_QUEUE_SHIFT) + & E1000_ETQF_QUEUE_MASK); + etqf |= E1000_ETQF_QUEUE_ENABLE; + + wr32(E1000_ETQF(i), etqf); + + input->etype_reg_index = i; + + return 0; +} + +int igb_rxnfc_write_vlan_prio_filter(struct igb_adapter *adapter, + struct igb_nfc_filter *input) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u16 queue_index; + u32 vlapqf; + + vlapqf = rd32(E1000_VLAPQF); + vlan_priority = (ntohs(input->filter.vlan_tci) & VLAN_PRIO_MASK) + >> VLAN_PRIO_SHIFT; + queue_index = (vlapqf >> (vlan_priority * 4)) & E1000_VLAPQF_QUEUE_MASK; + + /* check whether this vlan prio is already set */ + if ((vlapqf & E1000_VLAPQF_P_VALID(vlan_priority)) && + (queue_index != input->action)) { + dev_err(&adapter->pdev->dev, "ethtool rxnfc set vlan prio filter failed.\n"); + return -EEXIST; + } + + vlapqf |= E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf |= E1000_VLAPQF_QUEUE_SEL(vlan_priority, input->action); + + wr32(E1000_VLAPQF, vlapqf); + + return 0; +} + +int igb_add_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + int err = -EINVAL; + + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) { + err = igb_rxnfc_write_etype_filter(adapter, input); + if (err) + return err; + } + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + err = igb_rxnfc_write_vlan_prio_filter(adapter, input); + + return err; +} + +static void igb_clear_etype_filter_regs(struct igb_adapter *adapter, + u16 reg_index) +{ + struct e1000_hw *hw = &adapter->hw; + u32 etqf = rd32(E1000_ETQF(reg_index)); + + etqf &= ~E1000_ETQF_QUEUE_ENABLE; + etqf &= ~E1000_ETQF_QUEUE_MASK; + etqf &= ~E1000_ETQF_FILTER_ENABLE; + + wr32(E1000_ETQF(reg_index), etqf); + + adapter->etype_bitmap[reg_index] = false; +} + +static void igb_clear_vlan_prio_filter(struct igb_adapter *adapter, + u16 vlan_tci) +{ + struct e1000_hw *hw = &adapter->hw; + u8 vlan_priority; + u32 vlapqf; + + vlan_priority = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + + vlapqf = rd32(E1000_VLAPQF); + vlapqf &= ~E1000_VLAPQF_P_VALID(vlan_priority); + vlapqf &= ~E1000_VLAPQF_QUEUE_SEL(vlan_priority, + E1000_VLAPQF_QUEUE_MASK); + + wr32(E1000_VLAPQF, vlapqf); +} + +int igb_erase_filter(struct igb_adapter *adapter, struct igb_nfc_filter *input) +{ + if (input->filter.match_flags & IGB_FILTER_FLAG_ETHER_TYPE) + igb_clear_etype_filter_regs(adapter, + input->etype_reg_index); + + if (input->filter.match_flags & IGB_FILTER_FLAG_VLAN_TCI) + igb_clear_vlan_prio_filter(adapter, + ntohs(input->filter.vlan_tci)); + + return 0; +} + +static int igb_update_ethtool_nfc_entry(struct igb_adapter *adapter, + struct igb_nfc_filter *input, + u16 sw_idx) +{ + struct igb_nfc_filter *rule, *parent; + int err = -EINVAL; + + parent = NULL; + rule = NULL; + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = rule; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { + if (!input) + err = igb_erase_filter(adapter, rule); + + hlist_del(&rule->nfc_node); + kfree(rule); + adapter->nfc_filter_count--; + } + + /* If no input this was a delete, err should be 0 if a rule was + * successfully found and removed from the list else -EINVAL + */ + if (!input) + return err; + + /* initialize node */ + INIT_HLIST_NODE(&input->nfc_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&parent->nfc_node, &input->nfc_node); + else + hlist_add_head(&input->nfc_node, &adapter->nfc_filter_list); + + /* update counts */ + adapter->nfc_filter_count++; + + return 0; +} + +static int igb_add_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct igb_nfc_filter *input, *rule; + int err = 0; + + if (!(netdev->hw_features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + /* Don't allow programming if the action is a queue greater than + * the number of online Rx queues. + */ + if ((fsp->ring_cookie == RX_CLS_FLOW_DISC) || + (fsp->ring_cookie >= adapter->num_rx_queues)) { + dev_err(&adapter->pdev->dev, "ethtool -N: The specified action is invalid\n"); + return -EINVAL; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= IGB_MAX_RXNFC_FILTERS) { + dev_err(&adapter->pdev->dev, "Location out of range\n"); + return -EINVAL; + } + + if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW) + return -EINVAL; + + if (fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK && + fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) + return -EINVAL; + + input = kzalloc(sizeof(*input), GFP_KERNEL); + if (!input) + return -ENOMEM; + + if (fsp->m_u.ether_spec.h_proto == ETHER_TYPE_FULL_MASK) { + input->filter.etype = fsp->h_u.ether_spec.h_proto; + input->filter.match_flags = IGB_FILTER_FLAG_ETHER_TYPE; + } + + if ((fsp->flow_type & FLOW_EXT) && fsp->m_ext.vlan_tci) { + if (fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { + err = -EINVAL; + goto err_out; + } + input->filter.vlan_tci = fsp->h_ext.vlan_tci; + input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI; + } + + input->action = fsp->ring_cookie; + input->sw_idx = fsp->location; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) { + if (!memcmp(&input->filter, &rule->filter, + sizeof(input->filter))) { + err = -EEXIST; + dev_err(&adapter->pdev->dev, + "ethtool: this filter is already set\n"); + goto err_out_w_lock; + } + } + + err = igb_add_filter(adapter, input); + if (err) + goto err_out_w_lock; + + igb_update_ethtool_nfc_entry(adapter, input, input->sw_idx); + + spin_unlock(&adapter->nfc_lock); + return 0; + +err_out_w_lock: + spin_unlock(&adapter->nfc_lock); +err_out: + kfree(input); + return err; +} + +static int igb_del_ethtool_nfc_entry(struct igb_adapter *adapter, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err; + + spin_lock(&adapter->nfc_lock); + err = igb_update_ethtool_nfc_entry(adapter, NULL, fsp->location); + spin_unlock(&adapter->nfc_lock); + + return err; +} + static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { struct igb_adapter *adapter = netdev_priv(dev); @@ -2607,6 +2947,11 @@ static int igb_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXFH: ret = igb_set_rss_hash_opt(adapter, cmd); break; + case ETHTOOL_SRXCLSRLINS: + ret = igb_add_ethtool_nfc_entry(adapter, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = igb_del_ethtool_nfc_entry(adapter, cmd); default: break; } diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 942a89fb0090..af75eac5fa16 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -176,6 +176,8 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, static int igb_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); static void igb_check_vf_rate_limit(struct igb_adapter *); +static void igb_nfc_filter_exit(struct igb_adapter *adapter); +static void igb_nfc_filter_restore(struct igb_adapter *adapter); #ifdef CONFIG_PCI_IOV static int igb_vf_configure(struct igb_adapter *adapter, int vf); @@ -1611,6 +1613,7 @@ static void igb_configure(struct igb_adapter *adapter) igb_setup_mrqc(adapter); igb_setup_rctl(adapter); + igb_nfc_filter_restore(adapter); igb_configure_tx(adapter); igb_configure_rx(adapter); @@ -2059,6 +2062,21 @@ static int igb_set_features(struct net_device *netdev, if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE))) return 0; + if (!(features & NETIF_F_NTUPLE)) { + struct hlist_node *node2; + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + hlist_for_each_entry_safe(rule, node2, + &adapter->nfc_filter_list, nfc_node) { + igb_erase_filter(adapter, rule); + hlist_del(&rule->nfc_node); + kfree(rule); + } + spin_unlock(&adapter->nfc_lock); + adapter->nfc_filter_count = 0; + } + netdev->features = features; if (netif_running(netdev)) @@ -3053,6 +3071,7 @@ static int igb_sw_init(struct igb_adapter *adapter) VLAN_HLEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + spin_lock_init(&adapter->nfc_lock); spin_lock_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { @@ -3240,6 +3259,8 @@ static int __igb_close(struct net_device *netdev, bool suspending) igb_down(adapter); igb_free_irq(adapter); + igb_nfc_filter_exit(adapter); + igb_free_all_tx_resources(adapter); igb_free_all_rx_resources(adapter); @@ -8306,4 +8327,28 @@ int igb_reinit_queues(struct igb_adapter *adapter) return err; } + +static void igb_nfc_filter_exit(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_erase_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} + +static void igb_nfc_filter_restore(struct igb_adapter *adapter) +{ + struct igb_nfc_filter *rule; + + spin_lock(&adapter->nfc_lock); + + hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node) + igb_add_filter(adapter, rule); + + spin_unlock(&adapter->nfc_lock); +} /* igb_main.c */ diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 336c103ae374..66dfa2085cc7 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -998,12 +998,12 @@ static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter, /* define ethertype filter for timestamped packets */ if (is_l2) - wr32(E1000_ETQF(3), + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), (E1000_ETQF_FILTER_ENABLE | /* enable filter */ E1000_ETQF_1588 | /* enable timestamping */ ETH_P_1588)); /* 1588 eth protocol type */ else - wr32(E1000_ETQF(3), 0); + wr32(E1000_ETQF(IGB_ETQF_FILTER_1588), 0); /* L4 Queue Filter[3]: filter by destination port and protocol */ if (is_l4) { diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c index 87b7b814778b..712d8bcb7d8c 100644 --- a/drivers/net/ethernet/nuvoton/w90p910_ether.c +++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c @@ -751,7 +751,7 @@ static void netdev_rx(struct net_device *dev) dev_err(&pdev->dev, "rx crc err\n"); ether->stats.rx_crc_errors++; } else if (status & RXDS_ALIE) { - dev_err(&pdev->dev, "rx aligment err\n"); + dev_err(&pdev->dev, "rx alignment err\n"); ether->stats.rx_frame_errors++; } else if (status & RXDS_PTLE) { dev_err(&pdev->dev, "rx longer err\n"); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 6e17ee10e748..f6b8899cda7f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -414,7 +414,7 @@ static int qede_set_link_ksettings(struct net_device *dev, } params.adv_speeds = QED_LM_40000baseLR4_Full_BIT; break; - case 0xdead: + case SPEED_50000: if (!(current_link.supported_caps & QED_LM_50000baseKR2_Full_BIT)) { DP_INFO(edev, "50G speed not supported\n"); @@ -422,7 +422,7 @@ static int qede_set_link_ksettings(struct net_device *dev, } params.adv_speeds = QED_LM_50000baseKR2_Full_BIT; break; - case 0xbeef: + case SPEED_100000: if (!(current_link.supported_caps & QED_LM_100000baseKR4_Full_BIT)) { DP_INFO(edev, "100G speed not supported\n"); @@ -672,7 +672,7 @@ static int qede_set_pauseparam(struct net_device *dev, memset(¶ms, 0, sizeof(params)); params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; if (epause->autoneg) { - if (!(current_link.supported_caps & SUPPORTED_Autoneg)) { + if (!(current_link.supported_caps & QED_LM_Autoneg_BIT)) { DP_INFO(edev, "autoneg not supported\n"); return -EINVAL; } diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index b8c9f1884a15..2a70b1a39d80 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -646,7 +646,6 @@ static int efx_ef10_probe(struct efx_nic *efx) rc = efx_ef10_get_timer_config(efx); if (rc < 0) goto fail5; - efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */ rc = efx_mcdi_mon_probe(efx); if (rc && rc != -EPERM) diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index d66133bf3eb5..1c3e07c3d0b8 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -15,152 +15,218 @@ if PHYLIB config SWPHY bool -comment "MII PHY device drivers" - -config AQUANTIA_PHY - tristate "Drivers for the Aquantia PHYs" - ---help--- - Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 +comment "MDIO bus device drivers" -config AT803X_PHY - tristate "Drivers for Atheros AT803X PHYs" - ---help--- - Currently supports the AT8030 and AT8035 model +config MDIO_BCM_IPROC + tristate "Broadcom iProc MDIO bus controller" + depends on ARCH_BCM_IPROC || COMPILE_TEST + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Broadcom iProc SoC's. -config AMD_PHY - tristate "Drivers for the AMD PHYs" - ---help--- - Currently supports the am79c874 +config MDIO_BCM_UNIMAC + tristate "Broadcom UniMAC MDIO bus controller" + depends on HAS_IOMEM + help + This module provides a driver for the Broadcom UniMAC MDIO busses. + This hardware can be found in the Broadcom GENET Ethernet MAC + controllers as well as some Broadcom Ethernet switches such as the + Starfighter 2 switches. -config MARVELL_PHY - tristate "Drivers for Marvell PHYs" - ---help--- - Currently has a driver for the 88E1011S - -config DAVICOM_PHY - tristate "Drivers for Davicom PHYs" - ---help--- - Currently supports dm9161e and dm9131 +config MDIO_BITBANG + tristate "Bitbanged MDIO buses" + help + This module implements the MDIO bus protocol in software, + for use by low level drivers that export the ability to + drive the relevant pins. -config QSEMI_PHY - tristate "Drivers for Quality Semiconductor PHYs" - ---help--- - Currently supports the qs6612 + If in doubt, say N. -config LXT_PHY - tristate "Drivers for the Intel LXT PHYs" - ---help--- - Currently supports the lxt970, lxt971 +config MDIO_BUS_MUX + tristate + depends on OF_MDIO + help + This module provides a driver framework for MDIO bus + multiplexers which connect one of several child MDIO busses + to a parent bus. Switching between child busses is done by + device specific drivers. -config CICADA_PHY - tristate "Drivers for the Cicada PHYs" - ---help--- - Currently supports the cis8204 +config MDIO_BUS_MUX_BCM_IPROC + tristate "Broadcom iProc based MDIO bus multiplexers" + depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) + select MDIO_BUS_MUX + default ARCH_BCM_IPROC + help + This module provides a driver for MDIO bus multiplexers found in + iProc based Broadcom SoCs. This multiplexer connects one of several + child MDIO bus to a parent bus. Buses could be internal as well as + external and selection logic lies inside the same multiplexer. -config VITESSE_PHY - tristate "Drivers for the Vitesse PHYs" - ---help--- - Currently supports the vsc8244 +config MDIO_BUS_MUX_GPIO + tristate "GPIO controlled MDIO bus multiplexers" + depends on OF_GPIO && OF_MDIO + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via GPIO lines. The multiplexer connects one of + several child MDIO busses to a parent bus. Child bus + selection is under the control of GPIO lines. -config TERANETICS_PHY - tristate "Drivers for the Teranetics PHYs" - ---help--- - Currently supports the Teranetics TN2020 +config MDIO_BUS_MUX_MMIOREG + tristate "MMIO device-controlled MDIO bus multiplexers" + depends on OF_MDIO && HAS_IOMEM + select MDIO_BUS_MUX + help + This module provides a driver for MDIO bus multiplexers that + are controlled via a simple memory-mapped device, like an FPGA. + The multiplexer connects one of several child MDIO busses to a + parent bus. Child bus selection is under the control of one of + the FPGA's registers. -config SMSC_PHY - tristate "Drivers for SMSC PHYs" - ---help--- - Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs + Currently, only 8-bit registers are supported. -config BCM_NET_PHYLIB +config MDIO_CAVIUM tristate -config BROADCOM_PHY - tristate "Drivers for Broadcom PHYs" - select BCM_NET_PHYLIB +config MDIO_GPIO + tristate "GPIO lib-based bitbanged MDIO buses" + depends on MDIO_BITBANG && GPIOLIB ---help--- - Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, - BCM5481 and BCM5482 PHYs. + Supports GPIO lib-based MDIO busses. -config BCM_CYGNUS_PHY - tristate "Drivers for Broadcom Cygnus SoC internal PHY" - depends on ARCH_BCM_CYGNUS || COMPILE_TEST - depends on MDIO_BCM_IPROC - select BCM_NET_PHYLIB + To compile this driver as a module, choose M here: the module + will be called mdio-gpio. + +config MDIO_HISI_FEMAC + tristate "Hisilicon FEMAC MDIO bus controller" + depends on HAS_IOMEM && OF_MDIO + help + This module provides a driver for the MDIO busses found in the + Hisilicon SoC that have an Fast Ethernet MAC. + +config MDIO_MOXART + tristate "MOXA ART MDIO interface support" + depends on ARCH_MOXART + help + This driver supports the MDIO interface found in the network + interface units of the MOXA ART SoC + +config MDIO_OCTEON + tristate "Octeon and some ThunderX SOCs MDIO buses" + depends on 64BIT + depends on HAS_IOMEM + select MDIO_CAVIUM + help + This module provides a driver for the Octeon and ThunderX MDIO + buses. It is required by the Octeon and ThunderX ethernet device + drivers on some systems. + +config MDIO_SUN4I + tristate "Allwinner sun4i MDIO interface support" + depends on ARCH_SUNXI + help + This driver supports the MDIO interface found in the network + interface units of the Allwinner SoC that have an EMAC (A10, + A12, A10s, etc.) + +config MDIO_THUNDER + tristate "ThunderX SOCs MDIO buses" + depends on 64BIT + depends on PCI + select MDIO_CAVIUM + help + This driver supports the MDIO interfaces found on Cavium + ThunderX SoCs when the MDIO bus device appears as a PCI + device. + +config MDIO_XGENE + tristate "APM X-Gene SoC MDIO bus controller" + help + This module provides a driver for the MDIO busses found in the + APM X-Gene SoC's. + +comment "MII PHY device drivers" + +config AMD_PHY + tristate "AMD PHYs" ---help--- - This PHY driver is for the 1G internal PHYs of the Broadcom - Cygnus Family SoC. + Currently supports the am79c874 - Currently supports internal PHY's used in the BCM11300, - BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, - BCM58303 & BCM58305 Broadcom Cygnus SoCs. +config AQUANTIA_PHY + tristate "Aquantia PHYs" + ---help--- + Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 + +config AT803X_PHY + tristate "AT803X PHYs" + ---help--- + Currently supports the AT8030 and AT8035 model config BCM63XX_PHY - tristate "Drivers for Broadcom 63xx SOCs internal PHY" + tristate "Broadcom 63xx SOCs internal PHY" depends on BCM63XX select BCM_NET_PHYLIB ---help--- Currently supports the 6348 and 6358 PHYs. config BCM7XXX_PHY - tristate "Drivers for Broadcom 7xxx SOCs internal PHYs" + tristate "Broadcom 7xxx SOCs internal PHYs" select BCM_NET_PHYLIB ---help--- Currently supports the BCM7366, BCM7439, BCM7445, and 40nm and 65nm generation of BCM7xxx Set Top Box SoCs. config BCM87XX_PHY - tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs" + tristate "Broadcom BCM8706 and BCM8727 PHYs" help Currently supports the BCM8706 and BCM8727 10G Ethernet PHYs. -config ICPLUS_PHY - tristate "Drivers for ICPlus PHYs" +config BCM_CYGNUS_PHY + tristate "Broadcom Cygnus SoC internal PHY" + depends on ARCH_BCM_CYGNUS || COMPILE_TEST + depends on MDIO_BCM_IPROC + select BCM_NET_PHYLIB ---help--- - Currently supports the IP175C and IP1001 PHYs. + This PHY driver is for the 1G internal PHYs of the Broadcom + Cygnus Family SoC. -config REALTEK_PHY - tristate "Drivers for Realtek PHYs" - ---help--- - Supports the Realtek 821x PHY. + Currently supports internal PHY's used in the BCM11300, + BCM11320, BCM11350, BCM11360, BCM58300, BCM58302, + BCM58303 & BCM58305 Broadcom Cygnus SoCs. -config NATIONAL_PHY - tristate "Drivers for National Semiconductor PHYs" - ---help--- - Currently supports the DP83865 PHY. +config BCM_NET_PHYLIB + tristate -config STE10XP - tristate "Driver for STMicroelectronics STe10Xp PHYs" +config BROADCOM_PHY + tristate "Broadcom PHYs" + select BCM_NET_PHYLIB ---help--- - This is the driver for the STe100p and STe101p PHYs. + Currently supports the BCM5411, BCM5421, BCM5461, BCM54616S, BCM5464, + BCM5481 and BCM5482 PHYs. -config LSI_ET1011C_PHY - tristate "Driver for LSI ET1011C PHY" +config CICADA_PHY + tristate "Cicada PHYs" ---help--- - Supports the LSI ET1011C PHY. + Currently supports the cis8204 -config MICREL_PHY - tristate "Driver for Micrel PHYs" +config DAVICOM_PHY + tristate "Davicom PHYs" ---help--- - Supports the KSZ9021, VSC8201, KS8001 PHYs. + Currently supports dm9161e and dm9131 config DP83848_PHY - tristate "Driver for Texas Instruments DP83848 PHY" + tristate "Texas Instruments DP83848 PHY" ---help--- Supports the DP83848 PHY. config DP83867_PHY - tristate "Drivers for Texas Instruments DP83867 Gigabit PHY" + tristate "Texas Instruments DP83867 Gigabit PHY" ---help--- Currently supports the DP83867 PHY. -config MICROCHIP_PHY - tristate "Drivers for Microchip PHYs" - help - Supports the LAN88XX PHYs. - config FIXED_PHY - tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" + tristate "MDIO Bus/PHY emulation with fixed speed/link PHYs" depends on PHYLIB select SWPHY ---help--- @@ -169,148 +235,83 @@ config FIXED_PHY Currently tested with mpc866ads and mpc8349e-mitx. -config MDIO_BITBANG - tristate "Support for bitbanged MDIO buses" - help - This module implements the MDIO bus protocol in software, - for use by low level drivers that export the ability to - drive the relevant pins. - - If in doubt, say N. - -config MDIO_GPIO - tristate "Support for GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GPIOLIB +config ICPLUS_PHY + tristate "ICPlus PHYs" ---help--- - Supports GPIO lib-based MDIO busses. - - To compile this driver as a module, choose M here: the module - will be called mdio-gpio. - -config MDIO_CAVIUM - tristate - -config MDIO_OCTEON - tristate "Support for MDIO buses on Octeon and some ThunderX SOCs" - depends on 64BIT - depends on HAS_IOMEM - select MDIO_CAVIUM - help - This module provides a driver for the Octeon and ThunderX MDIO - buses. It is required by the Octeon and ThunderX ethernet device - drivers on some systems. - -config MDIO_THUNDER - tristate "Support for MDIO buses on ThunderX SOCs" - depends on 64BIT - depends on PCI - select MDIO_CAVIUM - help - This driver supports the MDIO interfaces found on Cavium - ThunderX SoCs when the MDIO bus device appears as a PCI - device. + Currently supports the IP175C and IP1001 PHYs. +config INTEL_XWAY_PHY + tristate "Intel XWAY PHYs" + ---help--- + Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. + These PHYs are marked as standalone chips under the names + PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel + SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. -config MDIO_SUN4I - tristate "Allwinner sun4i MDIO interface support" - depends on ARCH_SUNXI - help - This driver supports the MDIO interface found in the network - interface units of the Allwinner SoC that have an EMAC (A10, - A12, A10s, etc.) +config LSI_ET1011C_PHY + tristate "LSI ET1011C PHY" + ---help--- + Supports the LSI ET1011C PHY. -config MDIO_MOXART - tristate "MOXA ART MDIO interface support" - depends on ARCH_MOXART - help - This driver supports the MDIO interface found in the network - interface units of the MOXA ART SoC +config LXT_PHY + tristate "Intel LXT PHYs" + ---help--- + Currently supports the lxt970, lxt971 -config MDIO_BUS_MUX - tristate - depends on OF_MDIO - help - This module provides a driver framework for MDIO bus - multiplexers which connect one of several child MDIO busses - to a parent bus. Switching between child busses is done by - device specific drivers. +config MARVELL_PHY + tristate "Marvell PHYs" + ---help--- + Currently has a driver for the 88E1011S -config MDIO_BUS_MUX_GPIO - tristate "Support for GPIO controlled MDIO bus multiplexers" - depends on OF_GPIO && OF_MDIO - select MDIO_BUS_MUX - help - This module provides a driver for MDIO bus multiplexers that - are controlled via GPIO lines. The multiplexer connects one of - several child MDIO busses to a parent bus. Child bus - selection is under the control of GPIO lines. +config MICREL_PHY + tristate "Micrel PHYs" + ---help--- + Supports the KSZ9021, VSC8201, KS8001 PHYs. -config MDIO_BUS_MUX_MMIOREG - tristate "Support for MMIO device-controlled MDIO bus multiplexers" - depends on OF_MDIO && HAS_IOMEM - select MDIO_BUS_MUX +config MICROCHIP_PHY + tristate "Microchip PHYs" help - This module provides a driver for MDIO bus multiplexers that - are controlled via a simple memory-mapped device, like an FPGA. - The multiplexer connects one of several child MDIO busses to a - parent bus. Child bus selection is under the control of one of - the FPGA's registers. + Supports the LAN88XX PHYs. - Currently, only 8-bit registers are supported. +config MICROSEMI_PHY + tristate "Microsemi PHYs" + ---help--- + Currently supports the VSC8531 and VSC8541 PHYs -config MDIO_BUS_MUX_BCM_IPROC - tristate "Support for iProc based MDIO bus multiplexers" - depends on OF && OF_MDIO && (ARCH_BCM_IPROC || COMPILE_TEST) - select MDIO_BUS_MUX - default ARCH_BCM_IPROC - help - This module provides a driver for MDIO bus multiplexers found in - iProc based Broadcom SoCs. This multiplexer connects one of several - child MDIO bus to a parent bus. Buses could be internal as well as - external and selection logic lies inside the same multiplexer. +config NATIONAL_PHY + tristate "National Semiconductor PHYs" + ---help--- + Currently supports the DP83865 PHY. -config MDIO_BCM_UNIMAC - tristate "Broadcom UniMAC MDIO bus controller" - depends on HAS_IOMEM - help - This module provides a driver for the Broadcom UniMAC MDIO busses. - This hardware can be found in the Broadcom GENET Ethernet MAC - controllers as well as some Broadcom Ethernet switches such as the - Starfighter 2 switches. +config QSEMI_PHY + tristate "Quality Semiconductor PHYs" + ---help--- + Currently supports the qs6612 -config MDIO_BCM_IPROC - tristate "Broadcom iProc MDIO bus controller" - depends on ARCH_BCM_IPROC || COMPILE_TEST - depends on HAS_IOMEM && OF_MDIO - help - This module provides a driver for the MDIO busses found in the - Broadcom iProc SoC's. +config REALTEK_PHY + tristate "Realtek PHYs" + ---help--- + Supports the Realtek 821x PHY. -config INTEL_XWAY_PHY - tristate "Driver for Intel XWAY PHYs" +config SMSC_PHY + tristate "SMSC PHYs" ---help--- - Supports the Intel XWAY (former Lantiq) 11G and 22E PHYs. - These PHYs are marked as standalone chips under the names - PEF 7061, PEF 7071 and PEF 7072 or integrated into the Intel - SoCs xRX200, xRX300, xRX330, xRX350 and xRX550. + Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs -config MDIO_HISI_FEMAC - tristate "Hisilicon FEMAC MDIO bus controller" - depends on HAS_IOMEM && OF_MDIO - help - This module provides a driver for the MDIO busses found in the - Hisilicon SoC that have an Fast Ethernet MAC. +config STE10XP + tristate "STMicroelectronics STe10Xp PHYs" + ---help--- + This is the driver for the STe100p and STe101p PHYs. -config MDIO_XGENE - tristate "APM X-Gene SoC MDIO bus controller" - help - This module provides a driver for the MDIO busses found in the - APM X-Gene SoC's. +config TERANETICS_PHY + tristate "Teranetics PHYs" + ---help--- + Currently supports the Teranetics TN2020 -config MICROSEMI_PHY - tristate "Drivers for the Microsemi PHYs" - ---help--- - Currently supports the VSC8531 and VSC8541 PHYs +config VITESSE_PHY + tristate "Vitesse PHYs" + ---help--- + Currently supports the vsc8244 config XILINX_GMII2RGMII tristate "Xilinx GMII2RGMII converter driver" diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index 73d65ce04454..e58667d111e7 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -1,53 +1,55 @@ -# Makefile for Linux PHY drivers +# Makefile for Linux PHY drivers and MDIO bus drivers libphy-y := phy.o phy_device.o mdio_bus.o mdio_device.o libphy-$(CONFIG_SWPHY) += swphy.o obj-$(CONFIG_PHYLIB) += libphy.o + +obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o +obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o +obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o +obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o +obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o +obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o +obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o +obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o +obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o +obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o +obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o +obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o +obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o +obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o + +obj-$(CONFIG_AMD_PHY) += amd.o obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o -obj-$(CONFIG_MARVELL_PHY) += marvell.o -obj-$(CONFIG_DAVICOM_PHY) += davicom.o -obj-$(CONFIG_CICADA_PHY) += cicada.o -obj-$(CONFIG_LXT_PHY) += lxt.o -obj-$(CONFIG_QSEMI_PHY) += qsemi.o -obj-$(CONFIG_SMSC_PHY) += smsc.o -obj-$(CONFIG_MICROSEMI_PHY) += mscc.o -obj-$(CONFIG_TERANETICS_PHY) += teranetics.o -obj-$(CONFIG_VITESSE_PHY) += vitesse.o -obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o -obj-$(CONFIG_BROADCOM_PHY) += broadcom.o +obj-$(CONFIG_AT803X_PHY) += at803x.o obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o obj-$(CONFIG_BCM_CYGNUS_PHY) += bcm-cygnus.o -obj-$(CONFIG_ICPLUS_PHY) += icplus.o -obj-$(CONFIG_REALTEK_PHY) += realtek.o -obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o -obj-$(CONFIG_FIXED_PHY) += fixed_phy.o -obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o -obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o -obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_BCM_NET_PHYLIB) += bcm-phy-lib.o +obj-$(CONFIG_BROADCOM_PHY) += broadcom.o +obj-$(CONFIG_CICADA_PHY) += cicada.o +obj-$(CONFIG_DAVICOM_PHY) += davicom.o obj-$(CONFIG_DP83640_PHY) += dp83640.o obj-$(CONFIG_DP83848_PHY) += dp83848.o obj-$(CONFIG_DP83867_PHY) += dp83867.o -obj-$(CONFIG_STE10XP) += ste10Xp.o -obj-$(CONFIG_MICREL_PHY) += micrel.o -obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o -obj-$(CONFIG_MDIO_THUNDER) += mdio-thunder.o -obj-$(CONFIG_MDIO_CAVIUM) += mdio-cavium.o +obj-$(CONFIG_FIXED_PHY) += fixed_phy.o +obj-$(CONFIG_ICPLUS_PHY) += icplus.o +obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o +obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o +obj-$(CONFIG_LXT_PHY) += lxt.o +obj-$(CONFIG_MARVELL_PHY) += marvell.o obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o -obj-$(CONFIG_AT803X_PHY) += at803x.o -obj-$(CONFIG_AMD_PHY) += amd.o -obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o -obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o -obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o -obj-$(CONFIG_MDIO_BUS_MUX_BCM_IPROC) += mdio-mux-bcm-iproc.o -obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o -obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o -obj-$(CONFIG_MDIO_BCM_UNIMAC) += mdio-bcm-unimac.o +obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o -obj-$(CONFIG_MDIO_BCM_IPROC) += mdio-bcm-iproc.o -obj-$(CONFIG_INTEL_XWAY_PHY) += intel-xway.o -obj-$(CONFIG_MDIO_HISI_FEMAC) += mdio-hisi-femac.o -obj-$(CONFIG_MDIO_XGENE) += mdio-xgene.o +obj-$(CONFIG_MICROSEMI_PHY) += mscc.o +obj-$(CONFIG_NATIONAL_PHY) += national.o +obj-$(CONFIG_QSEMI_PHY) += qsemi.o +obj-$(CONFIG_REALTEK_PHY) += realtek.o +obj-$(CONFIG_SMSC_PHY) += smsc.o +obj-$(CONFIG_STE10XP) += ste10Xp.o +obj-$(CONFIG_TERANETICS_PHY) += teranetics.o +obj-$(CONFIG_VITESSE_PHY) += vitesse.o obj-$(CONFIG_XILINX_GMII2RGMII) += xilinx_gmii2rgmii.o diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h index 84d6cbdd11b2..3a562683603c 100644 --- a/drivers/net/xen-netback/common.h +++ b/drivers/net/xen-netback/common.h @@ -412,4 +412,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m); +#endif + #endif /* __XEN_NETBACK__COMMON_H__ */ diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c index fb87cb39a56b..282b16d8093a 100644 --- a/drivers/net/xen-netback/hash.c +++ b/drivers/net/xen-netback/hash.c @@ -369,6 +369,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, return XEN_NETIF_CTRL_STATUS_SUCCESS; } +#ifdef CONFIG_DEBUG_FS +void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m) +{ + unsigned int i; + + switch (vif->hash.alg) { + case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ: + seq_puts(m, "Hash Algorithm: TOEPLITZ\n"); + break; + + case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE: + seq_puts(m, "Hash Algorithm: NONE\n"); + /* FALLTHRU */ + default: + return; + } + + if (vif->hash.flags) { + seq_puts(m, "\nHash Flags:\n"); + + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4) + seq_puts(m, "- IPv4\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP) + seq_puts(m, "- IPv4 + TCP\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6) + seq_puts(m, "- IPv6\n"); + if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP) + seq_puts(m, "- IPv6 + TCP\n"); + } + + seq_puts(m, "\nHash Key:\n"); + + for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) { + unsigned int j, n; + + n = 8; + if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE) + n = XEN_NETBK_MAX_HASH_KEY_SIZE - i; + + seq_printf(m, "[%2u - %2u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%02x ", vif->hash.key[i]); + + seq_puts(m, "\n"); + } + + if (vif->hash.size != 0) { + seq_puts(m, "\nHash Mapping:\n"); + + for (i = 0; i < vif->hash.size; ) { + unsigned int j, n; + + n = 8; + if (i + n >= vif->hash.size) + n = vif->hash.size - i; + + seq_printf(m, "[%4u - %4u]: ", i, i + n - 1); + + for (j = 0; j < n; j++, i++) + seq_printf(m, "%4u ", vif->hash.mapping[i]); + + seq_puts(m, "\n"); + } + } +} +#endif /* CONFIG_DEBUG_FS */ + void xenvif_init_hash(struct xenvif *vif) { if (xenvif_hash_cache_size == 0) diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index 6a31f2610c23..bacf6e0c12b9 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count, return count; } -static int xenvif_dump_open(struct inode *inode, struct file *filp) +static int xenvif_io_ring_open(struct inode *inode, struct file *filp) { int ret; void *queue = NULL; @@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp) static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .owner = THIS_MODULE, - .open = xenvif_dump_open, + .open = xenvif_io_ring_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = xenvif_write_io_ring, }; +static int xenvif_read_ctrl(struct seq_file *m, void *v) +{ + struct xenvif *vif = m->private; + + xenvif_dump_hash_info(vif, m); + + return 0; +} + +static int xenvif_ctrl_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, xenvif_read_ctrl, inode->i_private); +} + +static const struct file_operations xenvif_dbg_ctrl_ops_fops = { + .owner = THIS_MODULE, + .open = xenvif_ctrl_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static void xenvif_debugfs_addif(struct xenvif *vif) { struct dentry *pfile; @@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif) pr_warn("Creation of io_ring file returned %ld!\n", PTR_ERR(pfile)); } + + if (vif->ctrl_task) { + pfile = debugfs_create_file("ctrl", + S_IRUSR, + vif->xenvif_dbg_root, + vif, + &xenvif_dbg_ctrl_ops_fops); + if (IS_ERR_OR_NULL(pfile)) + pr_warn("Creation of ctrl file returned %ld!\n", + PTR_ERR(pfile)); + } } else netdev_warn(vif->dev, "Creation of vif debugfs dir returned %ld!\n", diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 3db25df396cb..8eeedb2db924 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -205,6 +205,9 @@ struct bcma_host_ops { #define BCMA_PKG_ID_BCM4709 0 #define BCMA_CHIP_ID_BCM47094 53030 #define BCMA_CHIP_ID_BCM53018 53018 +#define BCMA_CHIP_ID_BCM53573 53573 +#define BCMA_PKG_ID_BCM53573 0 +#define BCMA_PKG_ID_BCM47189 1 /* Board types (on PCI usually equals to the subsystem dev id) */ /* BCM4313 */ diff --git a/include/linux/bcma/bcma_regs.h b/include/linux/bcma/bcma_regs.h index ebd5c1fcdea4..c607fce6aadd 100644 --- a/include/linux/bcma/bcma_regs.h +++ b/include/linux/bcma/bcma_regs.h @@ -23,6 +23,7 @@ #define BCMA_CLKCTLST_4328A0_HAVEALP 0x00020000 /* 4328a0 has reversed bits */ /* Agent registers (common for every core) */ +#define BCMA_OOB_SEL_OUT_A30 0x0100 #define BCMA_IOCTL 0x0408 /* IO control */ #define BCMA_IOCTL_CLK 0x0001 #define BCMA_IOCTL_FGC 0x0002 diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index a5f6ce6b578c..49d4aef1f789 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -81,6 +81,7 @@ static inline bool is_vlan_dev(const struct net_device *dev) #define skb_vlan_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) +#define skb_vlan_tag_get_prio(__skb) ((__skb)->vlan_tci & VLAN_PRIO_MASK) /** * struct vlan_pcpu_stats - VLAN percpu rx/tx stats diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 3ed7d20e3811..d8dc5c2243d5 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -318,9 +318,11 @@ struct qed_link_params { struct qed_link_output { bool link_up; - u32 supported_caps; /* In SUPPORTED defs */ - u32 advertised_caps; /* In ADVERTISED defs */ - u32 lp_caps; /* In ADVERTISED defs */ + /* In QED_LM_* defs */ + u32 supported_caps; + u32 advertised_caps; + u32 lp_caps; + u32 speed; /* In Mb/s */ u8 duplex; /* In DUPLEX defs */ u8 port; /* In PORT defs */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 3eef0802a0cd..8b72ee710f95 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -173,7 +173,7 @@ struct rhashtable_walker { struct rhashtable_iter { struct rhashtable *ht; struct rhash_head *p; - struct rhashtable_walker *walker; + struct rhashtable_walker walker; unsigned int slot; unsigned int skip; }; @@ -346,8 +346,8 @@ struct bucket_table *rhashtable_insert_slow(struct rhashtable *ht, struct bucket_table *old_tbl); int rhashtable_insert_rehash(struct rhashtable *ht, struct bucket_table *tbl); -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, - gfp_t gfp); +void rhashtable_walk_enter(struct rhashtable *ht, + struct rhashtable_iter *iter); void rhashtable_walk_exit(struct rhashtable_iter *iter); int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); void *rhashtable_walk_next(struct rhashtable_iter *iter); @@ -906,4 +906,12 @@ static inline int rhashtable_replace_fast( return err; } +/* Obsolete function, do not use in new code. */ +static inline int rhashtable_walk_init(struct rhashtable *ht, + struct rhashtable_iter *iter, gfp_t gfp) +{ + rhashtable_walk_enter(ht, iter); + return 0; +} + #endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0f665cb26b50..7047448e8129 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2295,7 +2295,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) int ___pskb_trim(struct sk_buff *skb, unsigned int len); -static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +static inline void __skb_set_length(struct sk_buff *skb, unsigned int len) { if (unlikely(skb_is_nonlinear(skb))) { WARN_ON(1); @@ -2305,6 +2305,11 @@ static inline void __skb_trim(struct sk_buff *skb, unsigned int len) skb_set_tail_pointer(skb, len); } +static inline void __skb_trim(struct sk_buff *skb, unsigned int len) +{ + __skb_set_length(skb, len); +} + void skb_trim(struct sk_buff *skb, unsigned int len); static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) @@ -2335,6 +2340,20 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len) BUG_ON(err); } +static inline int __skb_grow(struct sk_buff *skb, unsigned int len) +{ + unsigned int diff = len - skb->len; + + if (skb_tailroom(skb) < diff) { + int ret = pskb_expand_head(skb, 0, diff - skb_tailroom(skb), + GFP_ATOMIC); + if (ret) + return ret; + } + __skb_set_length(skb, len); + return 0; +} + /** * skb_orphan - orphan a buffer * @skb: buffer to orphan @@ -2938,6 +2957,21 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) return __pskb_trim(skb, len); } +static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + __skb_trim(skb, len); + return 0; +} + +static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) +{ + if (skb->ip_summed == CHECKSUM_COMPLETE) + skb->ip_summed = CHECKSUM_NONE; + return __skb_grow(skb, len); +} + #define skb_queue_walk(queue, skb) \ for (skb = (queue)->next; \ skb != (struct sk_buff *)(queue); \ @@ -3726,6 +3760,13 @@ static inline bool skb_is_gso_v6(const struct sk_buff *skb) return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; } +static inline void skb_gso_reset(struct sk_buff *skb) +{ + skb_shinfo(skb)->gso_size = 0; + skb_shinfo(skb)->gso_segs = 0; + skb_shinfo(skb)->gso_type = 0; +} + void __skb_warn_lro_forwarding(const struct sk_buff *skb); static inline bool skb_warn_if_lro(const struct sk_buff *skb) diff --git a/include/net/dsa.h b/include/net/dsa.h index 2217a3f817f8..d00c392bc9f8 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -386,4 +386,18 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst) void dsa_unregister_switch(struct dsa_switch *ds); int dsa_register_switch(struct dsa_switch *ds, struct device_node *np); +#ifdef CONFIG_PM_SLEEP +int dsa_switch_suspend(struct dsa_switch *ds); +int dsa_switch_resume(struct dsa_switch *ds); +#else +static inline int dsa_switch_suspend(struct dsa_switch *ds) +{ + return 0; +} +static inline int dsa_switch_resume(struct dsa_switch *ds) +{ + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + #endif diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index d3d60dccd19f..f266b512c3bd 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -32,8 +32,13 @@ struct flow_dissector_key_basic { }; struct flow_dissector_key_tags { - u32 vlan_id:12, - flow_label:20; + u32 flow_label; +}; + +struct flow_dissector_key_vlan { + u16 vlan_id:12, + vlan_priority:3; + u16 padding; }; struct flow_dissector_key_keyid { @@ -119,7 +124,7 @@ enum flow_dissector_key_id { FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */ FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */ FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */ - FLOW_DISSECTOR_KEY_VLANID, /* struct flow_dissector_key_flow_tags */ + FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */ FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */ FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */ FLOW_DISSECTOR_KEY_MPLS_ENTROPY, /* struct flow_dissector_key_keyid */ @@ -148,6 +153,7 @@ struct flow_keys { #define FLOW_KEYS_HASH_START_FIELD basic struct flow_dissector_key_basic basic; struct flow_dissector_key_tags tags; + struct flow_dissector_key_vlan vlan; struct flow_dissector_key_keyid keyid; struct flow_dissector_key_ports ports; struct flow_dissector_key_addrs addrs; diff --git a/include/net/sock.h b/include/net/sock.h index ff5be7e8ddea..2aab9b63bf16 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1114,6 +1114,16 @@ static inline bool sk_stream_is_writeable(const struct sock *sk) sk_stream_memory_free(sk); } +static inline int sk_under_cgroup_hierarchy(struct sock *sk, + struct cgroup *ancestor) +{ +#ifdef CONFIG_SOCK_CGROUP_DATA + return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), + ancestor); +#else + return -ENOTSUPP; +#endif +} static inline bool sk_has_memory_pressure(const struct sock *sk) { diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h index e29f52e8bdf1..6b835889ea30 100644 --- a/include/net/tc_act/tc_vlan.h +++ b/include/net/tc_act/tc_vlan.h @@ -20,6 +20,7 @@ struct tcf_vlan { int tcfv_action; u16 tcfv_push_vid; __be16 tcfv_push_proto; + u8 tcfv_push_prio; }; #define to_vlan(a) ((struct tcf_vlan *)a) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 866d53c33298..e4c5a1baa993 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -386,6 +386,17 @@ enum bpf_func_id { */ BPF_FUNC_current_task_under_cgroup, + /** + * bpf_skb_change_tail(skb, len, flags) + * The helper will resize the skb to the given new size, + * to be used f.e. with control messages. + * @skb: pointer to skb + * @len: new skb length + * @flags: reserved + * Return: 0 on success or negative error + */ + BPF_FUNC_skb_change_tail, + __BPF_FUNC_MAX_ID, }; diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h index c186f64fffca..ab92bca6d448 100644 --- a/include/uapi/linux/if_bridge.h +++ b/include/uapi/linux/if_bridge.h @@ -140,7 +140,7 @@ struct bridge_vlan_xstats { __u64 tx_bytes; __u64 tx_packets; __u16 vid; - __u16 pad1; + __u16 flags; __u32 pad2; }; diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index d1c1ccaba787..51b5b247fb5a 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -428,6 +428,9 @@ enum { TCA_FLOWER_KEY_UDP_DST, /* be16 */ TCA_FLOWER_FLAGS, + TCA_FLOWER_KEY_VLAN_ID, + TCA_FLOWER_KEY_VLAN_PRIO, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, __TCA_FLOWER_MAX, }; diff --git a/include/uapi/linux/tc_act/tc_vlan.h b/include/uapi/linux/tc_act/tc_vlan.h index 31151ff6264f..be72b6e3843b 100644 --- a/include/uapi/linux/tc_act/tc_vlan.h +++ b/include/uapi/linux/tc_act/tc_vlan.h @@ -29,6 +29,7 @@ enum { TCA_VLAN_PUSH_VLAN_ID, TCA_VLAN_PUSH_VLAN_PROTOCOL, TCA_VLAN_PAD, + TCA_VLAN_PUSH_VLAN_PRIORITY, __TCA_VLAN_MAX, }; #define TCA_VLAN_MAX (__TCA_VLAN_MAX - 1) diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h index 5f3f6d09fb79..bcb65ef725f6 100644 --- a/include/uapi/linux/tipc_netlink.h +++ b/include/uapi/linux/tipc_netlink.h @@ -59,6 +59,7 @@ enum { TIPC_NL_MON_SET, TIPC_NL_MON_GET, TIPC_NL_MON_PEER_GET, + TIPC_NL_PEER_REMOVE, __TIPC_NL_CMD_MAX, TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1 diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 5ba520b544d7..97e3cf08142c 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -489,10 +489,9 @@ exit: EXPORT_SYMBOL_GPL(rhashtable_insert_slow); /** - * rhashtable_walk_init - Initialise an iterator + * rhashtable_walk_enter - Initialise an iterator * @ht: Table to walk over * @iter: Hash table Iterator - * @gfp: GFP flags for allocations * * This function prepares a hash table walk. * @@ -507,30 +506,22 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * This function may sleep so you must not call it from interrupt * context or with spin locks held. * - * You must call rhashtable_walk_exit if this function returns - * successfully. + * You must call rhashtable_walk_exit after this function returns. */ -int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter, - gfp_t gfp) +void rhashtable_walk_enter(struct rhashtable *ht, struct rhashtable_iter *iter) { iter->ht = ht; iter->p = NULL; iter->slot = 0; iter->skip = 0; - iter->walker = kmalloc(sizeof(*iter->walker), gfp); - if (!iter->walker) - return -ENOMEM; - spin_lock(&ht->lock); - iter->walker->tbl = + iter->walker.tbl = rcu_dereference_protected(ht->tbl, lockdep_is_held(&ht->lock)); - list_add(&iter->walker->list, &iter->walker->tbl->walkers); + list_add(&iter->walker.list, &iter->walker.tbl->walkers); spin_unlock(&ht->lock); - - return 0; } -EXPORT_SYMBOL_GPL(rhashtable_walk_init); +EXPORT_SYMBOL_GPL(rhashtable_walk_enter); /** * rhashtable_walk_exit - Free an iterator @@ -541,10 +532,9 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_init); void rhashtable_walk_exit(struct rhashtable_iter *iter) { spin_lock(&iter->ht->lock); - if (iter->walker->tbl) - list_del(&iter->walker->list); + if (iter->walker.tbl) + list_del(&iter->walker.list); spin_unlock(&iter->ht->lock); - kfree(iter->walker); } EXPORT_SYMBOL_GPL(rhashtable_walk_exit); @@ -570,12 +560,12 @@ int rhashtable_walk_start(struct rhashtable_iter *iter) rcu_read_lock(); spin_lock(&ht->lock); - if (iter->walker->tbl) - list_del(&iter->walker->list); + if (iter->walker.tbl) + list_del(&iter->walker.list); spin_unlock(&ht->lock); - if (!iter->walker->tbl) { - iter->walker->tbl = rht_dereference_rcu(ht->tbl, ht); + if (!iter->walker.tbl) { + iter->walker.tbl = rht_dereference_rcu(ht->tbl, ht); return -EAGAIN; } @@ -597,7 +587,7 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_start); */ void *rhashtable_walk_next(struct rhashtable_iter *iter) { - struct bucket_table *tbl = iter->walker->tbl; + struct bucket_table *tbl = iter->walker.tbl; struct rhashtable *ht = iter->ht; struct rhash_head *p = iter->p; @@ -630,8 +620,8 @@ next: /* Ensure we see any new tables. */ smp_rmb(); - iter->walker->tbl = rht_dereference_rcu(tbl->future_tbl, ht); - if (iter->walker->tbl) { + iter->walker.tbl = rht_dereference_rcu(tbl->future_tbl, ht); + if (iter->walker.tbl) { iter->slot = 0; iter->skip = 0; return ERR_PTR(-EAGAIN); @@ -651,7 +641,7 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU) { struct rhashtable *ht; - struct bucket_table *tbl = iter->walker->tbl; + struct bucket_table *tbl = iter->walker.tbl; if (!tbl) goto out; @@ -660,9 +650,9 @@ void rhashtable_walk_stop(struct rhashtable_iter *iter) spin_lock(&ht->lock); if (tbl->rehash < tbl->size) - list_add(&iter->walker->list, &tbl->walkers); + list_add(&iter->walker.list, &tbl->walkers); else - iter->walker->tbl = NULL; + iter->walker.tbl = NULL; spin_unlock(&ht->lock); iter->p = NULL; diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index f2a29e467e78..872d4c0deb59 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -1245,14 +1245,30 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev) return 0; } -static size_t bridge_get_linkxstats_size(const struct net_device *dev) +static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) { - struct net_bridge *br = netdev_priv(dev); + struct net_bridge_port *p = NULL; struct net_bridge_vlan_group *vg; struct net_bridge_vlan *v; + struct net_bridge *br; int numvls = 0; - vg = br_vlan_group(br); + switch (attr) { + case IFLA_STATS_LINK_XSTATS: + br = netdev_priv(dev); + vg = br_vlan_group(br); + break; + case IFLA_STATS_LINK_XSTATS_SLAVE: + p = br_port_get_rtnl(dev); + if (!p) + return 0; + br = p->br; + vg = nbp_vlan_group(p); + break; + default: + return 0; + } + if (vg) { /* we need to count all, even placeholder entries */ list_for_each_entry(v, &vg->vlan_list, vlist) @@ -1264,44 +1280,38 @@ static size_t bridge_get_linkxstats_size(const struct net_device *dev) nla_total_size(0); } -static size_t brport_get_linkxstats_size(const struct net_device *dev) -{ - return nla_total_size(sizeof(struct br_mcast_stats)) + - nla_total_size(0); -} - -static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) +static int br_fill_linkxstats(struct sk_buff *skb, + const struct net_device *dev, + int *prividx, int attr) { - size_t retsize = 0; + struct nlattr *nla __maybe_unused; + struct net_bridge_port *p = NULL; + struct net_bridge_vlan_group *vg; + struct net_bridge_vlan *v; + struct net_bridge *br; + struct nlattr *nest; + int vl_idx = 0; switch (attr) { case IFLA_STATS_LINK_XSTATS: - retsize = bridge_get_linkxstats_size(dev); + br = netdev_priv(dev); + vg = br_vlan_group(br); break; case IFLA_STATS_LINK_XSTATS_SLAVE: - retsize = brport_get_linkxstats_size(dev); + p = br_port_get_rtnl(dev); + if (!p) + return 0; + br = p->br; + vg = nbp_vlan_group(p); break; + default: + return -EINVAL; } - return retsize; -} - -static int bridge_fill_linkxstats(struct sk_buff *skb, - const struct net_device *dev, - int *prividx) -{ - struct net_bridge *br = netdev_priv(dev); - struct nlattr *nla __maybe_unused; - struct net_bridge_vlan_group *vg; - struct net_bridge_vlan *v; - struct nlattr *nest; - int vl_idx = 0; - nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); if (!nest) return -EMSGSIZE; - vg = br_vlan_group(br); if (vg) { list_for_each_entry(v, &vg->vlan_list, vlist) { struct bridge_vlan_xstats vxi; @@ -1311,6 +1321,7 @@ static int bridge_fill_linkxstats(struct sk_buff *skb, continue; memset(&vxi, 0, sizeof(vxi)); vxi.vid = v->vid; + vxi.flags = v->flags; br_vlan_get_stats(v, &stats); vxi.rx_bytes = stats.rx_bytes; vxi.rx_packets = stats.rx_packets; @@ -1329,7 +1340,7 @@ static int bridge_fill_linkxstats(struct sk_buff *skb, BRIDGE_XSTATS_PAD); if (!nla) goto nla_put_failure; - br_multicast_get_stats(br, NULL, nla_data(nla)); + br_multicast_get_stats(br, p, nla_data(nla)); } #endif nla_nest_end(skb, nest); @@ -1344,52 +1355,6 @@ nla_put_failure: return -EMSGSIZE; } -static int brport_fill_linkxstats(struct sk_buff *skb, - const struct net_device *dev, - int *prividx) -{ - struct net_bridge_port *p = br_port_get_rtnl(dev); - struct nlattr *nla __maybe_unused; - struct nlattr *nest; - - if (!p) - return 0; - - nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE); - if (!nest) - return -EMSGSIZE; -#ifdef CONFIG_BRIDGE_IGMP_SNOOPING - nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST, - sizeof(struct br_mcast_stats), - BRIDGE_XSTATS_PAD); - if (!nla) { - nla_nest_end(skb, nest); - return -EMSGSIZE; - } - br_multicast_get_stats(p->br, p, nla_data(nla)); -#endif - nla_nest_end(skb, nest); - - return 0; -} - -static int br_fill_linkxstats(struct sk_buff *skb, const struct net_device *dev, - int *prividx, int attr) -{ - int ret = -EINVAL; - - switch (attr) { - case IFLA_STATS_LINK_XSTATS: - ret = bridge_fill_linkxstats(skb, dev, prividx); - break; - case IFLA_STATS_LINK_XSTATS_SLAVE: - ret = brport_fill_linkxstats(skb, dev, prividx); - break; - } - - return ret; -} - static struct rtnl_af_ops br_af_ops __read_mostly = { .family = AF_BRIDGE, .get_link_af_size = br_get_link_af_size_filtered, diff --git a/net/core/filter.c b/net/core/filter.c index cb06aceb512a..a83766be1ad2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1350,14 +1350,18 @@ struct bpf_scratchpad { static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); +static inline int __bpf_try_make_writable(struct sk_buff *skb, + unsigned int write_len) +{ + return skb_ensure_writable(skb, write_len); +} + static inline int bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) { - int err; + int err = __bpf_try_make_writable(skb, write_len); - err = skb_ensure_writable(skb, write_len); bpf_compute_data_end(skb); - return err; } @@ -1976,8 +1980,8 @@ static u64 bpf_skb_change_type(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) u32 pkt_type = r2; /* We only allow a restricted subset to be changed for now. */ - if (unlikely(skb->pkt_type > PACKET_OTHERHOST || - pkt_type > PACKET_OTHERHOST)) + if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || + !skb_pkt_type_ok(pkt_type))) return -EINVAL; skb->pkt_type = pkt_type; @@ -1992,6 +1996,92 @@ static const struct bpf_func_proto bpf_skb_change_type_proto = { .arg2_type = ARG_ANYTHING, }; +static u32 __bpf_skb_min_len(const struct sk_buff *skb) +{ + u32 min_len = skb_network_offset(skb); + + if (skb_transport_header_was_set(skb)) + min_len = skb_transport_offset(skb); + if (skb->ip_summed == CHECKSUM_PARTIAL) + min_len = skb_checksum_start_offset(skb) + + skb->csum_offset + sizeof(__sum16); + return min_len; +} + +static u32 __bpf_skb_max_len(const struct sk_buff *skb) +{ + return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : + 65536; +} + +static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) +{ + unsigned int old_len = skb->len; + int ret; + + ret = __skb_grow_rcsum(skb, new_len); + if (!ret) + memset(skb->data + old_len, 0, new_len - old_len); + return ret; +} + +static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) +{ + return __skb_trim_rcsum(skb, new_len); +} + +static u64 bpf_skb_change_tail(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) +{ + struct sk_buff *skb = (struct sk_buff *)(long) r1; + u32 max_len = __bpf_skb_max_len(skb); + u32 min_len = __bpf_skb_min_len(skb); + u32 new_len = (u32) r2; + int ret; + + if (unlikely(flags || new_len > max_len || new_len < min_len)) + return -EINVAL; + if (skb->encapsulation) + return -ENOTSUPP; + + /* The basic idea of this helper is that it's performing the + * needed work to either grow or trim an skb, and eBPF program + * rewrites the rest via helpers like bpf_skb_store_bytes(), + * bpf_lX_csum_replace() and others rather than passing a raw + * buffer here. This one is a slow path helper and intended + * for replies with control messages. + * + * Like in bpf_skb_change_proto(), we want to keep this rather + * minimal and without protocol specifics so that we are able + * to separate concerns as in bpf_skb_store_bytes() should only + * be the one responsible for writing buffers. + * + * It's really expected to be a slow path operation here for + * control message replies, so we're implicitly linearizing, + * uncloning and drop offloads from the skb by this. + */ + ret = __bpf_try_make_writable(skb, skb->len); + if (!ret) { + if (new_len > skb->len) + ret = bpf_skb_grow_rcsum(skb, new_len); + else if (new_len < skb->len) + ret = bpf_skb_trim_rcsum(skb, new_len); + if (!ret && skb_is_gso(skb)) + skb_gso_reset(skb); + } + + bpf_compute_data_end(skb); + return ret; +} + +static const struct bpf_func_proto bpf_skb_change_tail_proto = { + .func = bpf_skb_change_tail, + .gpl_only = false, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_ANYTHING, + .arg3_type = ARG_ANYTHING, +}; + bool bpf_helper_changes_skb_data(void *func) { if (func == bpf_skb_vlan_push) @@ -2002,6 +2092,8 @@ bool bpf_helper_changes_skb_data(void *func) return true; if (func == bpf_skb_change_proto) return true; + if (func == bpf_skb_change_tail) + return true; if (func == bpf_l3_csum_replace) return true; if (func == bpf_l4_csum_replace) @@ -2282,7 +2374,6 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) } } -#ifdef CONFIG_SOCK_CGROUP_DATA static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) { struct sk_buff *skb = (struct sk_buff *)(long)r1; @@ -2303,7 +2394,7 @@ static u64 bpf_skb_under_cgroup(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) if (unlikely(!cgrp)) return -EAGAIN; - return cgroup_is_descendant(sock_cgroup_ptr(&sk->sk_cgrp_data), cgrp); + return sk_under_cgroup_hierarchy(sk, cgrp); } static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { @@ -2314,7 +2405,41 @@ static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, }; -#endif + +static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, + unsigned long off, unsigned long len) +{ + memcpy(dst_buff, src_buff + off, len); + return 0; +} + +static u64 bpf_xdp_event_output(u64 r1, u64 r2, u64 flags, u64 r4, + u64 meta_size) +{ + struct xdp_buff *xdp = (struct xdp_buff *)(long) r1; + struct bpf_map *map = (struct bpf_map *)(long) r2; + u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; + void *meta = (void *)(long) r4; + + if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) + return -EINVAL; + if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) + return -EFAULT; + + return bpf_event_output(map, flags, meta, meta_size, xdp, xdp_size, + bpf_xdp_copy); +} + +static const struct bpf_func_proto bpf_xdp_event_output_proto = { + .func = bpf_xdp_event_output, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_CTX, + .arg2_type = ARG_CONST_MAP_PTR, + .arg3_type = ARG_ANYTHING, + .arg4_type = ARG_PTR_TO_STACK, + .arg5_type = ARG_CONST_STACK_SIZE, +}; static const struct bpf_func_proto * sk_filter_func_proto(enum bpf_func_id func_id) @@ -2368,6 +2493,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_skb_change_proto_proto; case BPF_FUNC_skb_change_type: return &bpf_skb_change_type_proto; + case BPF_FUNC_skb_change_tail: + return &bpf_skb_change_tail_proto; case BPF_FUNC_skb_get_tunnel_key: return &bpf_skb_get_tunnel_key_proto; case BPF_FUNC_skb_set_tunnel_key: @@ -2386,10 +2513,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) return &bpf_skb_event_output_proto; case BPF_FUNC_get_smp_processor_id: return &bpf_get_smp_processor_id_proto; -#ifdef CONFIG_SOCK_CGROUP_DATA case BPF_FUNC_skb_under_cgroup: return &bpf_skb_under_cgroup_proto; -#endif default: return sk_filter_func_proto(func_id); } @@ -2398,7 +2523,12 @@ tc_cls_act_func_proto(enum bpf_func_id func_id) static const struct bpf_func_proto * xdp_func_proto(enum bpf_func_id func_id) { - return sk_filter_func_proto(func_id); + switch (func_id) { + case BPF_FUNC_perf_event_output: + return &bpf_xdp_event_output_proto; + default: + return sk_filter_func_proto(func_id); + } } static bool __is_valid_access(int off, int size, enum bpf_access_type type) diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 91028ae2fb01..a2879c0f6c4c 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -118,13 +118,16 @@ bool __skb_flow_dissect(const struct sk_buff *skb, struct flow_dissector_key_addrs *key_addrs; struct flow_dissector_key_ports *key_ports; struct flow_dissector_key_tags *key_tags; + struct flow_dissector_key_vlan *key_vlan; struct flow_dissector_key_keyid *key_keyid; + bool skip_vlan = false; u8 ip_proto = 0; bool ret = false; if (!data) { data = skb->data; - proto = skb->protocol; + proto = skb_vlan_tag_present(skb) ? + skb->vlan_proto : skb->protocol; nhoff = skb_network_offset(skb); hlen = skb_headlen(skb); } @@ -243,23 +246,45 @@ ipv6: case htons(ETH_P_8021AD): case htons(ETH_P_8021Q): { const struct vlan_hdr *vlan; - struct vlan_hdr _vlan; - vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), data, hlen, &_vlan); - if (!vlan) - goto out_bad; + if (skb_vlan_tag_present(skb)) + proto = skb->protocol; + + if (!skb_vlan_tag_present(skb) || + proto == cpu_to_be16(ETH_P_8021Q) || + proto == cpu_to_be16(ETH_P_8021AD)) { + struct vlan_hdr _vlan; + + vlan = __skb_header_pointer(skb, nhoff, sizeof(_vlan), + data, hlen, &_vlan); + if (!vlan) + goto out_bad; + proto = vlan->h_vlan_encapsulated_proto; + nhoff += sizeof(*vlan); + if (skip_vlan) + goto again; + } + skip_vlan = true; if (dissector_uses_key(flow_dissector, - FLOW_DISSECTOR_KEY_VLANID)) { - key_tags = skb_flow_dissector_target(flow_dissector, - FLOW_DISSECTOR_KEY_VLANID, + FLOW_DISSECTOR_KEY_VLAN)) { + key_vlan = skb_flow_dissector_target(flow_dissector, + FLOW_DISSECTOR_KEY_VLAN, target_container); - key_tags->vlan_id = skb_vlan_tag_get_id(skb); + if (skb_vlan_tag_present(skb)) { + key_vlan->vlan_id = skb_vlan_tag_get_id(skb); + key_vlan->vlan_priority = + (skb_vlan_tag_get_prio(skb) >> VLAN_PRIO_SHIFT); + } else { + key_vlan->vlan_id = ntohs(vlan->h_vlan_TCI) & + VLAN_VID_MASK; + key_vlan->vlan_priority = + (ntohs(vlan->h_vlan_TCI) & + VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + } } - proto = vlan->h_vlan_encapsulated_proto; - nhoff += sizeof(*vlan); goto again; } case htons(ETH_P_PPP_SES): { @@ -917,8 +942,8 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = { .offset = offsetof(struct flow_keys, ports), }, { - .key_id = FLOW_DISSECTOR_KEY_VLANID, - .offset = offsetof(struct flow_keys, tags), + .key_id = FLOW_DISSECTOR_KEY_VLAN, + .offset = offsetof(struct flow_keys, vlan), }, { .key_id = FLOW_DISSECTOR_KEY_FLOW_LABEL, diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 7e68bc6bc853..8bda74e595a5 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -543,7 +543,7 @@ static void dsa_switch_destroy(struct dsa_switch *ds) } #ifdef CONFIG_PM_SLEEP -static int dsa_switch_suspend(struct dsa_switch *ds) +int dsa_switch_suspend(struct dsa_switch *ds) { int i, ret = 0; @@ -562,8 +562,9 @@ static int dsa_switch_suspend(struct dsa_switch *ds) return ret; } +EXPORT_SYMBOL_GPL(dsa_switch_suspend); -static int dsa_switch_resume(struct dsa_switch *ds) +int dsa_switch_resume(struct dsa_switch *ds) { int i, ret = 0; @@ -585,6 +586,7 @@ static int dsa_switch_resume(struct dsa_switch *ds) return 0; } +EXPORT_SYMBOL_GPL(dsa_switch_resume); #endif /* platform driver init and cleanup *****************************************/ @@ -1086,7 +1088,6 @@ static int dsa_resume(struct device *d) static SIMPLE_DEV_PM_OPS(dsa_pm_ops, dsa_suspend, dsa_resume); static const struct of_device_id dsa_of_match_table[] = { - { .compatible = "brcm,bcm7445-switch-v4.0" }, { .compatible = "marvell,dsa", }, {} }; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 539fa264e67d..8066ccc48a17 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1576,7 +1576,8 @@ static bool fib_good_nh(const struct fib_nh *nh) rcu_read_lock_bh(); - n = __ipv4_neigh_lookup_noref(nh->nh_dev, nh->nh_gw); + n = __ipv4_neigh_lookup_noref(nh->nh_dev, + (__force u32)nh->nh_gw); if (n) state = n->nud_state; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 032a96d78c99..f1a9a0a8a1f3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3092,23 +3092,6 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) } EXPORT_SYMBOL(tcp_get_md5sig_pool); -int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, - const struct tcphdr *th) -{ - struct scatterlist sg; - struct tcphdr hdr; - - /* We are not allowed to change tcphdr, make a local copy */ - memcpy(&hdr, th, sizeof(hdr)); - hdr.check = 0; - - /* options aren't included in the hash */ - sg_init_one(&sg, &hdr, sizeof(hdr)); - ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(hdr)); - return crypto_ahash_update(hp->md5_req); -} -EXPORT_SYMBOL(tcp_md5_hash_header); - int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, const struct sk_buff *skb, unsigned int header_len) { diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3ebf45b38bc3..8cd02c0b056c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4392,12 +4392,9 @@ static int tcp_try_rmem_schedule(struct sock *sk, struct sk_buff *skb, if (tcp_prune_queue(sk) < 0) return -1; - if (!sk_rmem_schedule(sk, skb, size)) { + while (!sk_rmem_schedule(sk, skb, size)) { if (!tcp_prune_ofo_queue(sk)) return -1; - - if (!sk_rmem_schedule(sk, skb, size)) - return -1; } } return 0; @@ -4874,29 +4871,41 @@ static void tcp_collapse_ofo_queue(struct sock *sk) } /* - * Purge the out-of-order queue. - * Return true if queue was pruned. + * Clean the out-of-order queue to make room. + * We drop high sequences packets to : + * 1) Let a chance for holes to be filled. + * 2) not add too big latencies if thousands of packets sit there. + * (But if application shrinks SO_RCVBUF, we could still end up + * freeing whole queue here) + * + * Return true if queue has shrunk. */ static bool tcp_prune_ofo_queue(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); - bool res = false; + struct sk_buff *skb; - if (!skb_queue_empty(&tp->out_of_order_queue)) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); - __skb_queue_purge(&tp->out_of_order_queue); + if (skb_queue_empty(&tp->out_of_order_queue)) + return false; - /* Reset SACK state. A conforming SACK implementation will - * do the same at a timeout based retransmit. When a connection - * is in a sad state like this, we care only about integrity - * of the connection not performance. - */ - if (tp->rx_opt.sack_ok) - tcp_sack_reset(&tp->rx_opt); + NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); + + while ((skb = __skb_dequeue_tail(&tp->out_of_order_queue)) != NULL) { + tcp_drop(sk, skb); sk_mem_reclaim(sk); - res = true; + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && + !tcp_under_memory_pressure(sk)) + break; } - return res; + + /* Reset SACK state. A conforming SACK implementation will + * do the same at a timeout based retransmit. When a connection + * is in a sad state like this, we care only about integrity + * of the connection not performance. + */ + if (tp->rx_opt.sack_ok) + tcp_sack_reset(&tp->rx_opt); + return true; } /* Reduce allocated memory if we can, trying to get diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index bdaef7fd6e47..8b45794eb6b2 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2776,7 +2776,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) max_segs = tcp_tso_autosize(sk, tcp_current_mss(sk)); tcp_for_write_queue_from(skb, sk) { - __u8 sacked = TCP_SKB_CB(skb)->sacked; + __u8 sacked; int segs; if (skb == tcp_send_head(sk)) @@ -2788,6 +2788,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) segs = tp->snd_cwnd - tcp_packets_in_flight(tp); if (segs <= 0) return; + sacked = TCP_SKB_CB(skb)->sacked; /* In case tcp_shift_skb_data() have aggregated large skbs, * we need to make sure not sending too bigs TSO packets */ diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index e61f7cd65d08..8f5f7f6026f7 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -114,6 +114,7 @@ #include <net/busy_poll.h> #include "udp_impl.h" #include <net/sock_reuseport.h> +#include <net/addrconf.h> struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); diff --git a/net/netlink/diag.c b/net/netlink/diag.c index 8dd836a8dd60..3e3e2534478a 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c @@ -63,43 +63,75 @@ out_nlmsg_trim: static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, int protocol, int s_num) { + struct rhashtable_iter *hti = (void *)cb->args[2]; struct netlink_table *tbl = &nl_table[protocol]; - struct rhashtable *ht = &tbl->hash; - const struct bucket_table *htbl = rht_dereference_rcu(ht->tbl, ht); struct net *net = sock_net(skb->sk); struct netlink_diag_req *req; struct netlink_sock *nlsk; struct sock *sk; - int ret = 0, num = 0, i; + int num = 2; + int ret = 0; req = nlmsg_data(cb->nlh); - for (i = 0; i < htbl->size; i++) { - struct rhash_head *pos; + if (s_num > 1) + goto mc_list; - rht_for_each_entry_rcu(nlsk, pos, htbl, i, node) { - sk = (struct sock *)nlsk; + num--; - if (!net_eq(sock_net(sk), net)) - continue; - if (num < s_num) { - num++; + if (!hti) { + hti = kmalloc(sizeof(*hti), GFP_KERNEL); + if (!hti) + return -ENOMEM; + + cb->args[2] = (long)hti; + } + + if (!s_num) + rhashtable_walk_enter(&tbl->hash, hti); + + ret = rhashtable_walk_start(hti); + if (ret == -EAGAIN) + ret = 0; + if (ret) + goto stop; + + while ((nlsk = rhashtable_walk_next(hti))) { + if (IS_ERR(nlsk)) { + ret = PTR_ERR(nlsk); + if (ret == -EAGAIN) { + ret = 0; continue; } + break; + } - if (sk_diag_fill(sk, skb, req, - NETLINK_CB(cb->skb).portid, - cb->nlh->nlmsg_seq, - NLM_F_MULTI, - sock_i_ino(sk)) < 0) { - ret = 1; - goto done; - } + sk = (struct sock *)nlsk; - num++; + if (!net_eq(sock_net(sk), net)) + continue; + + if (sk_diag_fill(sk, skb, req, + NETLINK_CB(cb->skb).portid, + cb->nlh->nlmsg_seq, + NLM_F_MULTI, + sock_i_ino(sk)) < 0) { + ret = 1; + break; } } +stop: + rhashtable_walk_stop(hti); + if (ret) + goto done; + + rhashtable_walk_exit(hti); + cb->args[2] = 0; + num++; + +mc_list: + read_lock(&nl_table_lock); sk_for_each_bound(sk, &tbl->mc_list) { if (sk_hashed(sk)) continue; @@ -116,13 +148,14 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, NLM_F_MULTI, sock_i_ino(sk)) < 0) { ret = 1; - goto done; + break; } num++; } + read_unlock(&nl_table_lock); + done: cb->args[0] = num; - cb->args[1] = protocol; return ret; } @@ -131,20 +164,20 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct netlink_diag_req *req; int s_num = cb->args[0]; + int err = 0; req = nlmsg_data(cb->nlh); - rcu_read_lock(); - read_lock(&nl_table_lock); - if (req->sdiag_protocol == NDIAG_PROTO_ALL) { int i; for (i = cb->args[1]; i < MAX_LINKS; i++) { - if (__netlink_diag_dump(skb, cb, i, s_num)) + err = __netlink_diag_dump(skb, cb, i, s_num); + if (err) break; s_num = 0; } + cb->args[1] = i; } else { if (req->sdiag_protocol >= MAX_LINKS) { read_unlock(&nl_table_lock); @@ -152,13 +185,22 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) return -ENOENT; } - __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); + err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); } - read_unlock(&nl_table_lock); - rcu_read_unlock(); + return err < 0 ? err : skb->len; +} + +static int netlink_diag_dump_done(struct netlink_callback *cb) +{ + struct rhashtable_iter *hti = (void *)cb->args[2]; + + if (cb->args[0] == 1) + rhashtable_walk_exit(hti); - return skb->len; + kfree(hti); + + return 0; } static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) @@ -172,6 +214,7 @@ static int netlink_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h) if (h->nlmsg_flags & NLM_F_DUMP) { struct netlink_dump_control c = { .dump = netlink_diag_dump, + .done = netlink_diag_dump_done, }; return netlink_dump_start(net->diag_nlsk, skb, h, &c); } else diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c index 691409de3e1a..59a8d3150ae2 100644 --- a/net/sched/act_vlan.c +++ b/net/sched/act_vlan.c @@ -43,7 +43,8 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a, goto drop; break; case TCA_VLAN_ACT_PUSH: - err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid); + err = skb_vlan_push(skb, v->tcfv_push_proto, v->tcfv_push_vid | + (v->tcfv_push_prio << VLAN_PRIO_SHIFT)); if (err) goto drop; break; @@ -65,6 +66,7 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = { [TCA_VLAN_PARMS] = { .len = sizeof(struct tc_vlan) }, [TCA_VLAN_PUSH_VLAN_ID] = { .type = NLA_U16 }, [TCA_VLAN_PUSH_VLAN_PROTOCOL] = { .type = NLA_U16 }, + [TCA_VLAN_PUSH_VLAN_PRIORITY] = { .type = NLA_U8 }, }; static int tcf_vlan_init(struct net *net, struct nlattr *nla, @@ -78,6 +80,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, int action; __be16 push_vid = 0; __be16 push_proto = 0; + u8 push_prio = 0; bool exists = false; int ret = 0, err; @@ -123,6 +126,9 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, } else { push_proto = htons(ETH_P_8021Q); } + + if (tb[TCA_VLAN_PUSH_VLAN_PRIORITY]) + push_prio = nla_get_u8(tb[TCA_VLAN_PUSH_VLAN_PRIORITY]); break; default: if (exists) @@ -150,6 +156,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla, v->tcfv_action = action; v->tcfv_push_vid = push_vid; + v->tcfv_push_prio = push_prio; v->tcfv_push_proto = push_proto; v->tcf_action = parm->action; @@ -181,7 +188,9 @@ static int tcf_vlan_dump(struct sk_buff *skb, struct tc_action *a, if (v->tcfv_action == TCA_VLAN_ACT_PUSH && (nla_put_u16(skb, TCA_VLAN_PUSH_VLAN_ID, v->tcfv_push_vid) || nla_put_be16(skb, TCA_VLAN_PUSH_VLAN_PROTOCOL, - v->tcfv_push_proto))) + v->tcfv_push_proto) || + (nla_put_u8(skb, TCA_VLAN_PUSH_VLAN_PRIORITY, + v->tcfv_push_prio)))) goto nla_put_failure; tcf_tm_dump(&t, &v->tcf_tm); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 5060801a2f6d..1e11e57e6947 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -28,6 +28,7 @@ struct fl_flow_key { struct flow_dissector_key_control control; struct flow_dissector_key_basic basic; struct flow_dissector_key_eth_addrs eth; + struct flow_dissector_key_vlan vlan; struct flow_dissector_key_addrs ipaddrs; union { struct flow_dissector_key_ipv4_addrs ipv4; @@ -293,6 +294,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_VLAN_ID] = { .type = NLA_U16 }, + [TCA_FLOWER_KEY_VLAN_PRIO] = { .type = NLA_U8 }, + [TCA_FLOWER_KEY_VLAN_ETH_TYPE] = { .type = NLA_U16 }, + }; static void fl_set_key_val(struct nlattr **tb, @@ -308,9 +313,29 @@ static void fl_set_key_val(struct nlattr **tb, memcpy(mask, nla_data(tb[mask_type]), len); } +static void fl_set_key_vlan(struct nlattr **tb, + struct flow_dissector_key_vlan *key_val, + struct flow_dissector_key_vlan *key_mask) +{ +#define VLAN_PRIORITY_MASK 0x7 + + if (tb[TCA_FLOWER_KEY_VLAN_ID]) { + key_val->vlan_id = + nla_get_u16(tb[TCA_FLOWER_KEY_VLAN_ID]) & VLAN_VID_MASK; + key_mask->vlan_id = VLAN_VID_MASK; + } + if (tb[TCA_FLOWER_KEY_VLAN_PRIO]) { + key_val->vlan_priority = + nla_get_u8(tb[TCA_FLOWER_KEY_VLAN_PRIO]) & + VLAN_PRIORITY_MASK; + key_mask->vlan_priority = VLAN_PRIORITY_MASK; + } +} + static int fl_set_key(struct net *net, struct nlattr **tb, struct fl_flow_key *key, struct fl_flow_key *mask) { + __be16 ethertype; #ifdef CONFIG_NET_CLS_IND if (tb[TCA_FLOWER_INDEV]) { int err = tcf_change_indev(net, tb[TCA_FLOWER_INDEV]); @@ -328,9 +353,19 @@ static int fl_set_key(struct net *net, struct nlattr **tb, mask->eth.src, TCA_FLOWER_KEY_ETH_SRC_MASK, sizeof(key->eth.src)); - fl_set_key_val(tb, &key->basic.n_proto, TCA_FLOWER_KEY_ETH_TYPE, - &mask->basic.n_proto, TCA_FLOWER_UNSPEC, - sizeof(key->basic.n_proto)); + if (tb[TCA_FLOWER_KEY_ETH_TYPE]) + ethertype = nla_get_be16(tb[TCA_FLOWER_KEY_ETH_TYPE]); + + if (ethertype == htons(ETH_P_8021Q)) { + fl_set_key_vlan(tb, &key->vlan, &mask->vlan); + fl_set_key_val(tb, &key->basic.n_proto, + TCA_FLOWER_KEY_VLAN_ETH_TYPE, + &mask->basic.n_proto, TCA_FLOWER_UNSPEC, + sizeof(key->basic.n_proto)); + } else { + key->basic.n_proto = ethertype; + mask->basic.n_proto = cpu_to_be16(~0); + } if (key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) { @@ -404,12 +439,10 @@ static int fl_init_hashtable(struct cls_fl_head *head, #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) #define FL_KEY_MEMBER_SIZE(member) (sizeof(((struct fl_flow_key *) 0)->member)) -#define FL_KEY_MEMBER_END_OFFSET(member) \ - (FL_KEY_MEMBER_OFFSET(member) + FL_KEY_MEMBER_SIZE(member)) -#define FL_KEY_IN_RANGE(mask, member) \ - (FL_KEY_MEMBER_OFFSET(member) <= (mask)->range.end && \ - FL_KEY_MEMBER_END_OFFSET(member) >= (mask)->range.start) +#define FL_KEY_IS_MASKED(mask, member) \ + memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \ + 0, FL_KEY_MEMBER_SIZE(member)) \ #define FL_KEY_SET(keys, cnt, id, member) \ do { \ @@ -418,9 +451,9 @@ static int fl_init_hashtable(struct cls_fl_head *head, cnt++; \ } while(0); -#define FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, id, member) \ +#define FL_KEY_SET_IF_MASKED(mask, keys, cnt, id, member) \ do { \ - if (FL_KEY_IN_RANGE(mask, member)) \ + if (FL_KEY_IS_MASKED(mask, member)) \ FL_KEY_SET(keys, cnt, id, member); \ } while(0); @@ -432,14 +465,16 @@ static void fl_init_dissector(struct cls_fl_head *head, FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_CONTROL, control); FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_BASIC, basic); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); - FL_KEY_SET_IF_IN_RANGE(mask, keys, cnt, - FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_ETH_ADDRS, eth); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_PORTS, tp); + FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt, + FLOW_DISSECTOR_KEY_VLAN, vlan); skb_flow_dissector_init(&head->dissector, keys, cnt); } @@ -668,6 +703,29 @@ static int fl_dump_key_val(struct sk_buff *skb, return 0; } +static int fl_dump_key_vlan(struct sk_buff *skb, + struct flow_dissector_key_vlan *vlan_key, + struct flow_dissector_key_vlan *vlan_mask) +{ + int err; + + if (!memchr_inv(vlan_mask, 0, sizeof(*vlan_mask))) + return 0; + if (vlan_mask->vlan_id) { + err = nla_put_u16(skb, TCA_FLOWER_KEY_VLAN_ID, + vlan_key->vlan_id); + if (err) + return err; + } + if (vlan_mask->vlan_priority) { + err = nla_put_u8(skb, TCA_FLOWER_KEY_VLAN_PRIO, + vlan_key->vlan_priority); + if (err) + return err; + } + return 0; +} + static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, struct sk_buff *skb, struct tcmsg *t) { @@ -712,6 +770,10 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh, &mask->basic.n_proto, TCA_FLOWER_UNSPEC, sizeof(key->basic.n_proto))) goto nla_put_failure; + + if (fl_dump_key_vlan(skb, &key->vlan, &mask->vlan)) + goto nla_put_failure; + if ((key->basic.n_proto == htons(ETH_P_IP) || key->basic.n_proto == htons(ETH_P_IPV6)) && fl_dump_key_val(skb, &key->basic.ip_proto, TCA_FLOWER_KEY_IP_PROTO, diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 25aada7b095c..d677b3484d81 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -260,6 +260,9 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) { struct Qdisc *q; + if (!qdisc_dev(root)) + return (root->handle == handle ? root : NULL); + if (!(root->flags & TCQ_F_BUILTIN) && root->handle == handle) return root; @@ -1432,7 +1435,7 @@ err_out: static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, struct netlink_callback *cb, - int *q_idx_p, int s_q_idx) + int *q_idx_p, int s_q_idx, bool recur) { int ret = 0, q_idx = *q_idx_p; struct Qdisc *q; @@ -1451,6 +1454,16 @@ static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb, goto done; q_idx++; } + + /* If dumping singletons, there is no qdisc_dev(root) and the singleton + * itself has already been dumped. + * + * If we've already dumped the top-level (ingress) qdisc above and the global + * qdisc hashtable, we don't want to hit it again + */ + if (!qdisc_dev(root) || !recur) + goto out; + hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { if (q_idx < s_q_idx) { q_idx++; @@ -1492,13 +1505,13 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb) s_q_idx = 0; q_idx = 0; - if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx) < 0) + if (tc_dump_qdisc_root(dev->qdisc, skb, cb, &q_idx, s_q_idx, true) < 0) goto done; dev_queue = dev_ingress_queue(dev); if (dev_queue && tc_dump_qdisc_root(dev_queue->qdisc_sleeping, skb, cb, - &q_idx, s_q_idx) < 0) + &q_idx, s_q_idx, false) < 0) goto done; cont: @@ -1775,6 +1788,9 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, if (tc_dump_tclass_qdisc(root, skb, tcm, cb, t_p, s_t) < 0) return -1; + if (!qdisc_dev(root)) + return 0; + hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) return -1; diff --git a/net/tipc/net.h b/net/tipc/net.h index 77a7a118911d..c7c254902873 100644 --- a/net/tipc/net.h +++ b/net/tipc/net.h @@ -39,6 +39,8 @@ #include <net/genetlink.h> +extern const struct nla_policy tipc_nl_net_policy[]; + int tipc_net_start(struct net *net, u32 addr); void tipc_net_stop(struct net *net); diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c index a84daec0afe9..2718de667828 100644 --- a/net/tipc/netlink.c +++ b/net/tipc/netlink.c @@ -238,6 +238,11 @@ static const struct genl_ops tipc_genl_v2_ops[] = { .dumpit = tipc_nl_node_dump_monitor_peer, .policy = tipc_nl_policy, }, + { + .cmd = TIPC_NL_PEER_REMOVE, + .doit = tipc_nl_peer_rm, + .policy = tipc_nl_policy, + } }; int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) diff --git a/net/tipc/node.c b/net/tipc/node.c index 21974191e425..7e8b75fd1a02 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -1553,6 +1553,69 @@ discard: kfree_skb(skb); } +int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info) +{ + struct net *net = sock_net(skb->sk); + struct tipc_net *tn = net_generic(net, tipc_net_id); + struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; + struct tipc_node *peer; + u32 addr; + int err; + int i; + + /* We identify the peer by its net */ + if (!info->attrs[TIPC_NLA_NET]) + return -EINVAL; + + err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX, + info->attrs[TIPC_NLA_NET], + tipc_nl_net_policy); + if (err) + return err; + + if (!attrs[TIPC_NLA_NET_ADDR]) + return -EINVAL; + + addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); + + if (in_own_node(net, addr)) + return -ENOTSUPP; + + spin_lock_bh(&tn->node_list_lock); + peer = tipc_node_find(net, addr); + if (!peer) { + spin_unlock_bh(&tn->node_list_lock); + return -ENXIO; + } + + tipc_node_write_lock(peer); + if (peer->state != SELF_DOWN_PEER_DOWN && + peer->state != SELF_DOWN_PEER_LEAVING) { + tipc_node_write_unlock(peer); + err = -EBUSY; + goto err_out; + } + + for (i = 0; i < MAX_BEARERS; i++) { + struct tipc_link_entry *le = &peer->links[i]; + + if (le->link) { + kfree(le->link); + le->link = NULL; + peer->link_cnt--; + } + } + tipc_node_write_unlock(peer); + tipc_node_delete(peer); + + err = 0; +err_out: + tipc_node_put(peer); + spin_unlock_bh(&tn->node_list_lock); + + return err; +} + int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) { int err; diff --git a/net/tipc/node.h b/net/tipc/node.h index d69fdfcc0ec9..4578b34c7dca 100644 --- a/net/tipc/node.h +++ b/net/tipc/node.h @@ -77,6 +77,7 @@ int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb); int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info); +int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info); int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info); |