diff options
author | Martin K. Petersen <martin.petersen@oracle.com> | 2025-01-14 11:35:50 -0500 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2025-01-14 11:35:50 -0500 |
commit | 7d6f88e76e28ac44ed003dcf80881ea6b202ec08 (patch) | |
tree | b625c43844cf6216efde6cf305ebc309eb3b4993 | |
parent | 295006f6e8c17212d3098811166e29627d19e05c (diff) | |
parent | 8697934682f1873b7b1cb9cc61b81edf042c9272 (diff) |
Merge patch series "Introduce support for Fabric Discovery and Login Services"
Karan Tilak Kumar <kartilak@cisco.com> says:
Hi Martin, reviewers,
This cover letter describes the feature: add support for Fabric
Discovery and Login Services (FDLS) to fnic driver.
This functionality is needed to support port channel RSCN (PC-RSCN)
handling and serves as a base to create FC-NVME initiators
(planned later), and eCPU handling (planned later).
It is used to discover the fabric and target ports associated with the
fabric. It will then login to the target ports that are zoned to it.
The driver uses the tport structure presented by FDLS.
Port channel RSCN is a Cisco vendor specific RSCN event. It is
applicable only to Cisco UCS fabrics.
In cases where the eCPU in the UCS VIC (Unified Computing Services
Virtual Interface Card) hangs, a fabric log out is sent to the fabric.
Upon successful log out from the fabric, the IO path is failed over to
a new path.
Generally from a feature perspective, the code is divided into adding
support for this functionality initially. Then, code has been added to
modify the IO path and interfaces. Finally, support for port channel
RSCN handling has been added.
Here are the headers of some of the salient patches:
o add headers and definitions for FDLS
o add support for fabric based solicited requests and responses
o add support for target based solicited requests and responses
o add support for unsolicited requests and responses
o add support for FDMI
o add support for FIP
o add functionality in fnic to support FDLS
o modify IO path to use FDLS and tport
o modify fnic interfaces to use FDLS
o add support to handle port channel RSCN
Even though the patches have been made into a series, some patches are
heavier than others. But, every effort has been made to keep the
purpose of each patch as a single-purpose, and to compile cleanly.
All the individual patches compile cleanly. The compiler used is GCC
13.3. Some function calls have been coded as placeholders with
appropriate comments to avoid compiler warnings.
This patchset has been tested as a whole. Therefore, the tested-by
fields have been added only to one patch in the set. I've refrained
from adding tested-by to most of the patches, so as to not mislead the
reviewer/reader.
A brief note on the unit tests:
o. Perform zone in zone out testing in a loop: remove a target
port from the zone, add it to the zone in a loop. 1000+ iterations
of this test have been successful.
o. Configure multipathing, and run link flaps on single link.
IOs drop briefly, but pick up as expected.
o. Configure multipathing, and run link flaps on two links, with a
30 second delay in between. IOs drop briefly, but pick up as expected.
o. Module load/unload test.
o. Repeat the above tests with 1 queue and 64 queues.
All tests were successful.
Please consider this patch series for the next merge window.
Link: https://lore.kernel.org/r/20241212020312.4786-1-kartilak@cisco.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r-- | drivers/scsi/fnic/Makefile | 5 | ||||
-rw-r--r-- | drivers/scsi/fnic/fdls_disc.c | 4997 | ||||
-rw-r--r-- | drivers/scsi/fnic/fdls_fc.h | 253 | ||||
-rw-r--r-- | drivers/scsi/fnic/fip.c | 1005 | ||||
-rw-r--r-- | drivers/scsi/fnic/fip.h | 159 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic.h | 288 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_attrs.c | 12 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_debugfs.c | 11 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_fcs.c | 1742 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_fdls.h | 434 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_fip.h | 48 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_io.h | 14 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_isr.c | 28 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_main.c | 752 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_pci_subsys_devid.c | 131 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_res.c | 77 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_scsi.c | 1161 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_stats.h | 49 | ||||
-rw-r--r-- | drivers/scsi/fnic/fnic_trace.c | 81 |
19 files changed, 9313 insertions, 1934 deletions
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile index 6214a6b2e96d..c025e875009e 100644 --- a/drivers/scsi/fnic/Makefile +++ b/drivers/scsi/fnic/Makefile @@ -2,11 +2,13 @@ obj-$(CONFIG_FCOE_FNIC) += fnic.o fnic-y := \ + fip.o\ fnic_attrs.o \ fnic_isr.o \ fnic_main.o \ fnic_res.o \ fnic_fcs.o \ + fdls_disc.o \ fnic_scsi.o \ fnic_trace.o \ fnic_debugfs.o \ @@ -15,4 +17,5 @@ fnic-y := \ vnic_intr.o \ vnic_rq.o \ vnic_wq_copy.o \ - vnic_wq.o + vnic_wq.o \ + fnic_pci_subsys_devid.o diff --git a/drivers/scsi/fnic/fdls_disc.c b/drivers/scsi/fnic/fdls_disc.c new file mode 100644 index 000000000000..11211c469583 --- /dev/null +++ b/drivers/scsi/fnic/fdls_disc.c @@ -0,0 +1,4997 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include <linux/workqueue.h> +#include "fnic.h" +#include "fdls_fc.h" +#include "fnic_fdls.h" +#include <scsi/fc/fc_fcp.h> +#include <scsi/scsi_transport_fc.h> +#include <linux/utsname.h> + +#define FC_FC4_TYPE_SCSI 0x08 +#define PORT_SPEED_BIT_8 8 +#define PORT_SPEED_BIT_9 9 +#define PORT_SPEED_BIT_14 14 +#define PORT_SPEED_BIT_15 15 + +/* FNIC FDMI Register HBA Macros */ +#define FNIC_FDMI_NUM_PORTS 1 +#define FNIC_FDMI_NUM_HBA_ATTRS 9 +#define FNIC_FDMI_TYPE_NODE_NAME 0X1 +#define FNIC_FDMI_TYPE_MANUFACTURER 0X2 +#define FNIC_FDMI_MANUFACTURER "Cisco Systems" +#define FNIC_FDMI_TYPE_SERIAL_NUMBER 0X3 +#define FNIC_FDMI_TYPE_MODEL 0X4 +#define FNIC_FDMI_TYPE_MODEL_DES 0X5 +#define FNIC_FDMI_MODEL_DESCRIPTION "Cisco Virtual Interface Card" +#define FNIC_FDMI_TYPE_HARDWARE_VERSION 0X6 +#define FNIC_FDMI_TYPE_DRIVER_VERSION 0X7 +#define FNIC_FDMI_TYPE_ROM_VERSION 0X8 +#define FNIC_FDMI_TYPE_FIRMWARE_VERSION 0X9 +#define FNIC_FDMI_NN_LEN 8 +#define FNIC_FDMI_MANU_LEN 20 +#define FNIC_FDMI_SERIAL_LEN 16 +#define FNIC_FDMI_MODEL_LEN 12 +#define FNIC_FDMI_MODEL_DES_LEN 56 +#define FNIC_FDMI_HW_VER_LEN 16 +#define FNIC_FDMI_DR_VER_LEN 28 +#define FNIC_FDMI_ROM_VER_LEN 8 +#define FNIC_FDMI_FW_VER_LEN 16 + +/* FNIC FDMI Register PA Macros */ +#define FNIC_FDMI_TYPE_FC4_TYPES 0X1 +#define FNIC_FDMI_TYPE_SUPPORTED_SPEEDS 0X2 +#define FNIC_FDMI_TYPE_CURRENT_SPEED 0X3 +#define FNIC_FDMI_TYPE_MAX_FRAME_SIZE 0X4 +#define FNIC_FDMI_TYPE_OS_NAME 0X5 +#define FNIC_FDMI_TYPE_HOST_NAME 0X6 +#define FNIC_FDMI_NUM_PORT_ATTRS 6 +#define FNIC_FDMI_FC4_LEN 32 +#define FNIC_FDMI_SUPP_SPEED_LEN 4 +#define FNIC_FDMI_CUR_SPEED_LEN 4 +#define FNIC_FDMI_MFS_LEN 4 +#define FNIC_FDMI_MFS 0x800 +#define FNIC_FDMI_OS_NAME_LEN 16 +#define FNIC_FDMI_HN_LEN 24 + +#define FDLS_FDMI_PLOGI_PENDING 0x1 +#define FDLS_FDMI_REG_HBA_PENDING 0x2 +#define FDLS_FDMI_RPA_PENDING 0x4 +#define FDLS_FDMI_ABORT_PENDING 0x8 +#define FDLS_FDMI_MAX_RETRY 3 + +#define RETRIES_EXHAUSTED(iport) \ + (iport->fabric.retry_counter == FABRIC_LOGO_MAX_RETRY) + +#define FNIC_TPORT_MAX_NEXUS_RESTART (8) + +#define SCHEDULE_OXID_FREE_RETRY_TIME (300) + +/* Private Functions */ +static void fdls_fdmi_register_hba(struct fnic_iport_s *iport); +static void fdls_fdmi_register_pa(struct fnic_iport_s *iport); +static void fdls_send_rpn_id(struct fnic_iport_s *iport); +static void fdls_process_flogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, + void *rx_frame); +static void fnic_fdls_start_plogi(struct fnic_iport_s *iport); +static void fnic_fdls_start_flogi(struct fnic_iport_s *iport); +static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport, + uint32_t fcid, + uint64_t wwpn); +static void fdls_target_restart_nexus(struct fnic_tport_s *tport); +static void fdls_start_tport_timer(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, int timeout); +static void fdls_tport_timer_callback(struct timer_list *t); +static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport); +static void fdls_start_fabric_timer(struct fnic_iport_s *iport, + int timeout); +static void fdls_init_plogi_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_els_acc_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_els_rjt_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_logo_frame(uint8_t *frame, struct fnic_iport_s *iport); +static void fdls_init_fabric_abts_frame(uint8_t *frame, + struct fnic_iport_s *iport); + +uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + uint8_t *frame = NULL; + + frame = mempool_alloc(fnic->frame_pool, GFP_ATOMIC); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame"); + return NULL; + } + + memset(frame, 0, FNIC_FCOE_FRAME_MAXSZ); + return frame; +} + +/** + * fdls_alloc_oxid - Allocate an oxid from the bitmap based oxid pool + * @iport: Handle to iport instance + * @oxid_frame_type: Type of frame to allocate + * @active_oxid: the oxid which is in use + * + * Called with fnic lock held + */ +uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, + uint16_t *active_oxid) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + int idx; + uint16_t oxid; + + lockdep_assert_held(&fnic->fnic_lock); + + /* + * Allocate next available oxid from bitmap + */ + idx = find_next_zero_bit(oxid_pool->bitmap, FNIC_OXID_POOL_SZ, oxid_pool->next_idx); + if (idx == FNIC_OXID_POOL_SZ) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Alloc oxid: all oxid slots are busy iport state:%d\n", + iport->state); + return FNIC_UNASSIGNED_OXID; + } + + WARN_ON(test_and_set_bit(idx, oxid_pool->bitmap)); + oxid_pool->next_idx = (idx + 1) % FNIC_OXID_POOL_SZ; /* cycle through the bitmap */ + + oxid = FNIC_OXID_ENCODE(idx, oxid_frame_type); + *active_oxid = oxid; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "alloc oxid: 0x%x, iport state: %d\n", + oxid, iport->state); + return oxid; +} + +/** + * fdls_free_oxid_idx - Free the oxid using the idx + * @iport: Handle to iport instance + * @oxid_idx: The index to free + * + * Free the oxid immediately and make it available for new requests + * Called with fnic lock held + */ +static void fdls_free_oxid_idx(struct fnic_iport_s *iport, uint16_t oxid_idx) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + + lockdep_assert_held(&fnic->fnic_lock); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "free oxid idx: 0x%x\n", oxid_idx); + + WARN_ON(!test_and_clear_bit(oxid_idx, oxid_pool->bitmap)); +} + +/** + * fdls_reclaim_oxid_handler - Callback handler for delayed_oxid_work + * @work: Handle to work_struct + * + * Scheduled when an oxid is to be freed later + * After freeing expired oxid(s), the handler schedules + * another callback with the remaining time + * of next unexpired entry in the reclaim list. + */ +void fdls_reclaim_oxid_handler(struct work_struct *work) +{ + struct fnic_oxid_pool_s *oxid_pool = container_of(work, + struct fnic_oxid_pool_s, oxid_reclaim_work.work); + struct fnic_iport_s *iport = container_of(oxid_pool, + struct fnic_iport_s, oxid_pool); + struct fnic *fnic = iport->fnic; + struct reclaim_entry_s *reclaim_entry, *next; + unsigned long delay_j, cur_jiffies; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reclaim oxid callback\n"); + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + /* Though the work was scheduled for one entry, + * walk through and free the expired entries which might have been scheduled + * at around the same time as the first entry + */ + list_for_each_entry_safe(reclaim_entry, next, + &(oxid_pool->oxid_reclaim_list), links) { + + /* The list is always maintained in the order of expiry time */ + cur_jiffies = jiffies; + if (time_before(cur_jiffies, reclaim_entry->expires)) + break; + + list_del(&reclaim_entry->links); + fdls_free_oxid_idx(iport, reclaim_entry->oxid_idx); + kfree(reclaim_entry); + } + + /* schedule to free up the next entry */ + if (!list_empty(&oxid_pool->oxid_reclaim_list)) { + reclaim_entry = list_first_entry(&oxid_pool->oxid_reclaim_list, + struct reclaim_entry_s, links); + + delay_j = reclaim_entry->expires - cur_jiffies; + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Scheduling next callback at:%ld jiffies\n", delay_j); + } + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); +} + +/** + * fdls_free_oxid - Helper function to free the oxid + * @iport: Handle to iport instance + * @oxid: oxid to free + * @active_oxid: the oxid which is in use + * + * Called with fnic lock held + */ +void fdls_free_oxid(struct fnic_iport_s *iport, + uint16_t oxid, uint16_t *active_oxid) +{ + fdls_free_oxid_idx(iport, FNIC_OXID_IDX(oxid)); + *active_oxid = FNIC_UNASSIGNED_OXID; +} + +/** + * fdls_schedule_oxid_free - Schedule oxid to be freed later + * @iport: Handle to iport instance + * @active_oxid: the oxid which is in use + * + * Gets called in a rare case scenario when both a command + * (fdls or target discovery) timed out and the following ABTS + * timed out as well, without a link change. + * + * Called with fnic lock held + */ +void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid) +{ + struct fnic *fnic = iport->fnic; + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + struct reclaim_entry_s *reclaim_entry; + unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); + int oxid_idx = FNIC_OXID_IDX(*active_oxid); + + lockdep_assert_held(&fnic->fnic_lock); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Schedule oxid free. oxid: 0x%x\n", *active_oxid); + + *active_oxid = FNIC_UNASSIGNED_OXID; + + reclaim_entry = (struct reclaim_entry_s *) + kzalloc(sizeof(struct reclaim_entry_s), GFP_ATOMIC); + + if (!reclaim_entry) { + FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Failed to allocate memory for reclaim struct for oxid idx: %d\n", + oxid_idx); + + /* Retry the scheduling */ + WARN_ON(test_and_set_bit(oxid_idx, oxid_pool->pending_schedule_free)); + schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, 0); + return; + } + + reclaim_entry->oxid_idx = oxid_idx; + reclaim_entry->expires = round_jiffies(jiffies + delay_j); + + list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); + + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); +} + +/** + * fdls_schedule_oxid_free_retry_work - Thread to schedule the + * oxid to be freed later + * + * @work: Handle to the work struct + */ +void fdls_schedule_oxid_free_retry_work(struct work_struct *work) +{ + struct fnic_oxid_pool_s *oxid_pool = container_of(work, + struct fnic_oxid_pool_s, schedule_oxid_free_retry.work); + struct fnic_iport_s *iport = container_of(oxid_pool, + struct fnic_iport_s, oxid_pool); + struct fnic *fnic = iport->fnic; + struct reclaim_entry_s *reclaim_entry; + unsigned long delay_j = msecs_to_jiffies(OXID_RECLAIM_TOV(iport)); + int idx; + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + for_each_set_bit(idx, oxid_pool->pending_schedule_free, FNIC_OXID_POOL_SZ) { + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Schedule oxid free. oxid idx: %d\n", idx); + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + reclaim_entry = (struct reclaim_entry_s *) + kzalloc(sizeof(struct reclaim_entry_s), GFP_KERNEL); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + if (!reclaim_entry) { + FNIC_FCS_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Failed to allocate memory for reclaim struct for oxid idx: 0x%x\n", + idx); + + schedule_delayed_work(&oxid_pool->schedule_oxid_free_retry, + msecs_to_jiffies(SCHEDULE_OXID_FREE_RETRY_TIME)); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } + + if (test_and_clear_bit(idx, oxid_pool->pending_schedule_free)) { + reclaim_entry->oxid_idx = idx; + reclaim_entry->expires = round_jiffies(jiffies + delay_j); + list_add_tail(&reclaim_entry->links, &oxid_pool->oxid_reclaim_list); + schedule_delayed_work(&oxid_pool->oxid_reclaim_work, delay_j); + } else { + /* unlikely scenario, free the allocated memory and continue */ + kfree(reclaim_entry); + } +} + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); +} + +static bool fdls_is_oxid_fabric_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + case FNIC_FRAME_TYPE_FABRIC_RPN: + case FNIC_FRAME_TYPE_FABRIC_RFT: + case FNIC_FRAME_TYPE_FABRIC_RFF: + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + case FNIC_FRAME_TYPE_FABRIC_LOGO: + break; + default: + return false; + } + return true; +} + +static bool fdls_is_oxid_fdmi_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FDMI_PLOGI: + case FNIC_FRAME_TYPE_FDMI_RHBA: + case FNIC_FRAME_TYPE_FDMI_RPA: + break; + default: + return false; + } + return true; +} + +static bool fdls_is_oxid_tgt_req(uint16_t oxid) +{ + int oxid_frame_type = FNIC_FRAME_TYPE(oxid); + + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_TGT_PLOGI: + case FNIC_FRAME_TYPE_TGT_PRLI: + case FNIC_FRAME_TYPE_TGT_ADISC: + case FNIC_FRAME_TYPE_TGT_LOGO: + break; + default: + return false; + } + return true; +} + +static void fdls_reset_oxid_pool(struct fnic_iport_s *iport) +{ + struct fnic_oxid_pool_s *oxid_pool = &iport->oxid_pool; + + oxid_pool->next_idx = 0; +} + +void fnic_del_fabric_timer_sync(struct fnic *fnic) +{ + fnic->iport.fabric.del_timer_inprogress = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + del_timer_sync(&fnic->iport.fabric.retry_timer); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic->iport.fabric.del_timer_inprogress = 0; +} + +void fnic_del_tport_timer_sync(struct fnic *fnic, + struct fnic_tport_s *tport) +{ + tport->del_timer_inprogress = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + del_timer_sync(&tport->retry_timer); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + tport->del_timer_inprogress = 0; +} + +static void +fdls_start_fabric_timer(struct fnic_iport_s *iport, int timeout) +{ + u64 fabric_tov; + struct fnic *fnic = iport->fnic; + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + iport->fabric.timer_pending = 0; + } + + if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + iport->fabric.retry_counter++; + + fabric_tov = jiffies + msecs_to_jiffies(timeout); + mod_timer(&iport->fabric.retry_timer, round_jiffies(fabric_tov)); + iport->fabric.timer_pending = 1; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fabric timer is %d ", timeout); +} + +static void +fdls_start_tport_timer(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, int timeout) +{ + u64 fabric_tov; + struct fnic *fnic = iport->fnic; + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) + tport->retry_counter++; + + fabric_tov = jiffies + msecs_to_jiffies(timeout); + mod_timer(&tport->retry_timer, round_jiffies(fabric_tov)); + tport->timer_pending = 1; +} + +void fdls_init_plogi_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_flogi *pplogi; + uint8_t s_id[3]; + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pplogi = (struct fc_std_flogi) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFC}, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els = { + .fl_cmd = ELS_PLOGI, + .fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, + .sp_lo_ver = FNIC_FC_PH_VER_LO, + .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), + .sp_features = cpu_to_be16(FC_SP_FT_CIRO), + .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ), + .sp_tot_seq = cpu_to_be16(FNIC_FC_CONCUR_SEQS), + .sp_rel_off = cpu_to_be16(FNIC_FC_RO_INFO), + .sp_e_d_tov = cpu_to_be32(FC_DEF_E_D_TOV)}, + .fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ), + .fl_cssp[2].cp_rdfs = cpu_to_be16(0x800), + .fl_cssp[2].cp_con_seq = cpu_to_be16(0xFF), + .fl_cssp[2].cp_open_seq = 1} + }; + + FNIC_STD_SET_NPORT_NAME(&pplogi->els.fl_wwpn, iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pplogi->els.fl_wwnn, iport->wwnn); + FNIC_LOGI_SET_RDF_SIZE(pplogi->els, iport->max_payload_size); + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(pplogi->fchdr, s_id); +} + +static void fdls_init_els_acc_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_els_acc_rsp *pels_acc; + uint8_t s_id[3]; + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pels_acc = (struct fc_std_els_acc_rsp) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}}, + .acc.la_cmd = ELS_LS_ACC, + }; + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(pels_acc->fchdr, s_id); + FNIC_STD_SET_RX_ID(pels_acc->fchdr, FNIC_UNASSIGNED_RXID); +} + +static void fdls_init_els_rjt_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_els_rjt_rsp *pels_rjt; + + pels_rjt = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pels_rjt = (struct fc_std_els_rjt_rsp) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REP, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REP_FCTL, 0, 0}}, + .rej.er_cmd = ELS_LS_RJT, + }; + + FNIC_STD_SET_RX_ID(pels_rjt->fchdr, FNIC_UNASSIGNED_RXID); +} + +static void fdls_init_logo_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_std_logo *plogo; + uint8_t s_id[3]; + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *plogo = (struct fc_std_logo) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}}, + .els.fl_cmd = ELS_LOGO, + }; + + hton24(s_id, iport->fcid); + FNIC_STD_SET_S_ID(plogo->fchdr, s_id); + memcpy(plogo->els.fl_n_port_id, s_id, 3); + + FNIC_STD_SET_NPORT_NAME(&plogo->els.fl_n_port_wwn, + iport->wwpn); +} + +static void fdls_init_fabric_abts_frame(uint8_t *frame, + struct fnic_iport_s *iport) +{ + struct fc_frame_header *pfabric_abts; + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pfabric_abts = (struct fc_frame_header) { + .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ + .fh_s_id = {0x00, 0x00, 0x00}, + .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, + .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, + .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), + .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ + }; +} + +static void +fdls_send_rscn_resp(struct fnic_iport_s *iport, + struct fc_frame_header *rscn_fchdr) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *pels_acc; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_acc_rsp); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RSCN response"); + return; + } + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(pels_acc->fchdr, rscn_fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(rscn_fchdr); + FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RSCN response with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_send_logo_resp(struct fnic_iport_s *iport, + struct fc_frame_header *req_fchdr) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *plogo_resp; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_acc_rsp); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send LOGO response"); + return; + } + + plogo_resp = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(plogo_resp->fchdr, req_fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(req_fchdr); + FNIC_STD_SET_OX_ID(plogo_resp->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send LOGO response with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +void +fdls_send_tport_abts(struct fnic_iport_s *iport, + struct fnic_tport_s *tport) +{ + uint8_t *frame; + uint8_t s_id[3]; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *ptport_abts; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send tport ABTS"); + return; + } + + ptport_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *ptport_abts = (struct fc_frame_header) { + .fh_r_ctl = FC_RCTL_BA_ABTS, /* ABTS */ + .fh_cs_ctl = 0x00, .fh_type = FC_TYPE_BLS, + .fh_f_ctl = {FNIC_REQ_ABTS_FCTL, 0, 0}, .fh_seq_id = 0x00, + .fh_df_ctl = 0x00, .fh_seq_cnt = 0x0000, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID), + .fh_parm_offset = 0x00000000, /* bit:0 = 0 Abort a exchange */ + }; + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + FNIC_STD_SET_S_ID(*ptport_abts, s_id); + FNIC_STD_SET_D_ID(*ptport_abts, d_id); + tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED; + + FNIC_STD_SET_OX_ID(*ptport_abts, tport->active_oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tport abts: tport->state: %d ", + iport->fcid, tport->state); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov); +} +static void fdls_send_fabric_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + uint8_t s_id[3]; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *pfabric_abts; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric ABTS"); + return; + } + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_fabric_abts_frame(frame, iport); + + hton24(s_id, iport->fcid); + + switch (iport->fabric.state) { + case FDLS_STATE_FABRIC_LOGO: + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_FABRIC_FLOGI: + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_FABRIC_PLOGI: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_RPN_ID: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_SCR: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_FCTRL); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_REGISTER_FC4_TYPES: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_REGISTER_FC4_FEATURES: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + + case FDLS_STATE_GPN_FT: + FNIC_STD_SET_S_ID(*pfabric_abts, s_id); + hton24(d_id, FC_FID_DIR_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + break; + default: + return; + } + + oxid = iport->active_oxid_fabric_req; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric abts. iport->fabric.state: %d oxid: 0x%x", + iport->fcid, iport->fabric.state, oxid); + + iport->fabric.flags |= FNIC_FDLS_FABRIC_ABORT_ISSUED; + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); + iport->fabric.timer_pending = 1; +} + +static void fdls_send_fdmi_abts(struct fnic_iport_s *iport) +{ + uint8_t *frame; + uint8_t d_id[3]; + struct fnic *fnic = iport->fnic; + struct fc_frame_header *pfabric_abts; + unsigned long fdmi_tov; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_frame_header); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI ABTS"); + return; + } + + pfabric_abts = (struct fc_frame_header *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_fabric_abts_frame(frame, iport); + + hton24(d_id, FC_FID_MGMT_SERV); + FNIC_STD_SET_D_ID(*pfabric_abts, d_id); + + if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + oxid = iport->active_oxid_fdmi_plogi; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + fnic_send_fcoe_frame(iport, frame, frame_size); + } else { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) { + oxid = iport->active_oxid_fdmi_rhba; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + fnic_send_fcoe_frame(iport, frame, frame_size); + } + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) { + oxid = iport->active_oxid_fdmi_rpa; + FNIC_STD_SET_OX_ID(*pfabric_abts, oxid); + fnic_send_fcoe_frame(iport, frame, frame_size); + } + } + + fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); + mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); + iport->fabric.fdmi_pending |= FDLS_FDMI_ABORT_PENDING; +} + +static void fdls_send_fabric_flogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pflogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FLOGI"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pflogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pflogi = (struct fc_std_flogi) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_d_id = {0xFF, 0xFF, 0xFE}, + .fh_type = FC_TYPE_ELS, .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els.fl_cmd = ELS_FLOGI, + .els.fl_csp = {.sp_hi_ver = FNIC_FC_PH_VER_HI, + .sp_lo_ver = FNIC_FC_PH_VER_LO, + .sp_bb_cred = cpu_to_be16(FNIC_FC_B2B_CREDIT), + .sp_bb_data = cpu_to_be16(FNIC_FC_B2B_RDF_SZ)}, + .els.fl_cssp[2].cp_class = cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) + }; + + FNIC_STD_SET_NPORT_NAME(&pflogi->els.fl_wwpn, iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pflogi->els.fl_wwnn, iport->wwnn); + FNIC_LOGI_SET_RDF_SIZE(pflogi->els, iport->max_payload_size); + FNIC_LOGI_SET_R_A_TOV(pflogi->els, iport->r_a_tov); + FNIC_LOGI_SET_E_D_TOV(pflogi->els, iport->e_d_tov); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pflogi->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric FLOGI with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_flogi_sent); +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_fabric_plogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send PLOGI"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_PLOGI, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send fabric PLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric PLOGI with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_plogi_sent); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_fdmi_plogi(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + uint8_t d_id[3]; + u64 fdmi_tov; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI PLOGI"); + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_PLOGI, + &iport->active_oxid_fdmi_plogi); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI PLOGI", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + hton24(d_id, FC_FID_MGMT_SERV); + FNIC_STD_SET_D_ID(pplogi->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI PLOGI with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + fdmi_tov = jiffies + msecs_to_jiffies(2 * iport->e_d_tov); + mod_timer(&iport->fabric.fdmi_timer, round_jiffies(fdmi_tov)); + iport->fabric.fdmi_pending = FDLS_FDMI_PLOGI_PENDING; +} + +static void fdls_send_rpn_id(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rpn_id *prpn_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rpn_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RPN_ID"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + prpn_id = (struct fc_std_rpn_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prpn_id = (struct fc_std_rpn_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RPN_ID)} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prpn_id->fchdr, fcid); + + FNIC_STD_SET_PORT_ID(prpn_id->rpn_id, fcid); + FNIC_STD_SET_PORT_NAME(prpn_id->rpn_id, iport->wwpn); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RPN, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RPN_ID", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(prpn_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RPN ID with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_scr(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_scr *pscr; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_scr); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send SCR"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pscr = (struct fc_std_scr *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pscr = (struct fc_std_scr) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, + .fh_d_id = {0xFF, 0xFF, 0xFD}, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .scr = {.scr_cmd = ELS_SCR, + .scr_reg_func = ELS_SCRF_FULL} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(pscr->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_SCR, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send SCR", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pscr->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send SCR with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.fabric_scr_sent); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_gpn_ft(struct fnic_iport_s *iport, int fdls_state) +{ + uint8_t *frame; + struct fc_std_gpn_ft *pgpn_ft; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_gpn_ft); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send GPN FT"); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pgpn_ft = (struct fc_std_gpn_ft *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pgpn_ft = (struct fc_std_gpn_ft) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_GPN_FT)}, + .gpn_ft.fn_fc4_type = 0x08 + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(pgpn_ft->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_GPN_FT, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send GPN FT", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + iport->fabric.flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pgpn_ft->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send GPN FT with oxid: 0x%x", iport->fcid, + oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); + fdls_set_state((&iport->fabric), fdls_state); +} + +static void +fdls_send_tgt_adisc(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_els_adisc *padisc; + uint8_t s_id[3]; + uint8_t d_id[3]; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_adisc); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT ADISC"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + padisc = (struct fc_std_els_adisc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + memcpy(padisc->els.adisc_port_id, s_id, 3); + FNIC_STD_SET_S_ID(padisc->fchdr, s_id); + FNIC_STD_SET_D_ID(padisc->fchdr, d_id); + + FNIC_STD_SET_F_CTL(padisc->fchdr, FNIC_ELS_REQ_FCTL << 16); + FNIC_STD_SET_R_CTL(padisc->fchdr, FC_RCTL_ELS_REQ); + FNIC_STD_SET_TYPE(padisc->fchdr, FC_TYPE_ELS); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_ADISC, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send TGT ADISC", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(padisc->fchdr, oxid); + FNIC_STD_SET_RX_ID(padisc->fchdr, FNIC_UNASSIGNED_RXID); + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + FNIC_STD_SET_NPORT_NAME(&padisc->els.adisc_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&padisc->els.adisc_wwnn, + iport->wwnn); + + padisc->els.adisc_cmd = ELS_ADISC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send ADISC to tgt fcid: 0x%x", + iport->fcid, tport->fcid); + + atomic64_inc(&iport->iport_stats.tport_adisc_sent); + + fnic_send_fcoe_frame(iport, frame, frame_size); + +err_out: + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, 2 * iport->e_d_tov); +} + +bool fdls_delete_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + struct fnic_tport_event_s *tport_del_evt; + struct fnic *fnic = iport->fnic; + + if ((tport->state == FDLS_TGT_STATE_OFFLINING) + || (tport->state == FDLS_TGT_STATE_OFFLINE)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: tport state is offlining/offline\n", + tport->fcid); + return false; + } + + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); + /* + * By setting this flag, the tport will not be seen in a look-up + * in an RSCN. Even if we move to multithreaded model, this tport + * will be destroyed and a new RSCN will have to create a new one + */ + tport->flags |= FNIC_FDLS_TPORT_TERMINATING; + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_rport_exch_reset(iport->fnic, tport->fcid); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + if (tport->flags & FNIC_FDLS_SCSI_REGISTERED) { + tport_del_evt = + kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_del_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for tport fcid: 0x%0x\n", + tport->fcid); + return false; + } + tport_del_evt->event = TGT_EV_RPORT_DEL; + tport_del_evt->arg1 = (void *) tport; + list_add_tail(&tport_del_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport 0x%x not reg with scsi_transport. Freeing locally", + tport->fcid); + list_del(&tport->links); + kfree(tport); + } + return true; +} + +static void +fdls_send_tgt_plogi(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_flogi *pplogi; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_flogi); + uint8_t d_id[3]; + uint32_t timeout; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT PLOGI"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pplogi = (struct fc_std_flogi *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_plogi_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PLOGI, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate oxid to send PLOGI to fcid: 0x%x", + iport->fcid, tport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + FNIC_STD_SET_OX_ID(pplogi->fchdr, oxid); + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + hton24(d_id, tport->fcid); + FNIC_STD_SET_D_ID(pplogi->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tgt PLOGI to tgt: 0x%x with oxid: 0x%x", + iport->fcid, tport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.tport_plogi_sent); + +err_out: + timeout = max(2 * iport->e_d_tov, iport->plogi_timeout); + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, timeout); +} + +static uint16_t +fnic_fc_plogi_rsp_rdf(struct fnic_iport_s *iport, + struct fc_std_flogi *plogi_rsp) +{ + uint16_t b2b_rdf_size = + be16_to_cpu(FNIC_LOGI_RDF_SIZE(plogi_rsp->els)); + uint16_t spc3_rdf_size = + be16_to_cpu(plogi_rsp->els.fl_cssp[2].cp_rdfs) & FNIC_FC_C3_RDF; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFS: b2b_rdf_size: 0x%x spc3_rdf_size: 0x%x", + b2b_rdf_size, spc3_rdf_size); + + return min(b2b_rdf_size, spc3_rdf_size); +} + +static void fdls_send_register_fc4_types(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rft_id *prft_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rft_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RFT"); + return; + } + + prft_id = (struct fc_std_rft_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prft_id = (struct fc_std_rft_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RFT_ID)} + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prft_id->fchdr, fcid); + FNIC_STD_SET_PORT_ID(prft_id->rft_id, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFT, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RFT", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prft_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RFT with oxid: 0x%x", iport->fcid, + oxid); + + prft_id->rft_id.fr_fts.ff_type_map[0] = + cpu_to_be32(1 << FC_TYPE_FCP); + + prft_id->rft_id.fr_fts.ff_type_map[1] = + cpu_to_be32(1 << (FC_TYPE_CT % FC_NS_BPW)); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void fdls_send_register_fc4_features(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_rff_id *prff_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rff_id); + uint8_t fcid[3]; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RFF"); + return; + } + + prff_id = (struct fc_std_rff_id *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prff_id = (struct fc_std_rff_id) { + .fchdr = {.fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFC}, .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .fc_std_ct_hdr = {.ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_DIR, + .ct_fs_subtype = FC_NS_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_NS_RFF_ID)}, + .rff_id.fr_feat = 0x2, + .rff_id.fr_type = FC_TYPE_FCP + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prff_id->fchdr, fcid); + FNIC_STD_SET_PORT_ID(prff_id->rff_id, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_RFF, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send RFF", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prff_id->fchdr, oxid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send RFF with oxid: 0x%x", iport->fcid, + oxid); + + prff_id->rff_id.fr_type = FC_TYPE_FCP; + + fnic_send_fcoe_frame(iport, frame, frame_size); + + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +static void +fdls_send_tgt_prli(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_els_prli *pprli; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_prli); + uint8_t s_id[3]; + uint8_t d_id[3]; + uint32_t timeout; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send TGT PRLI"); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + pprli = (struct fc_std_els_prli *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pprli = (struct fc_std_els_prli) { + .fchdr = {.fh_r_ctl = FC_RCTL_ELS_REQ, .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els_prli = {.prli_cmd = ELS_PRLI, + .prli_spp_len = 16, + .prli_len = cpu_to_be16(0x14)}, + .sp = {.spp_type = 0x08, .spp_flags = 0x0020, + .spp_params = cpu_to_be32(0xA2)} + }; + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_PRLI, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send TGT PRLI to 0x%x", + iport->fcid, tport->fcid); + mempool_free(frame, fnic->frame_pool); + tport->flags |= FNIC_FDLS_RETRY_FRAME; + goto err_out; + } + + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + + hton24(s_id, iport->fcid); + hton24(d_id, tport->fcid); + + FNIC_STD_SET_OX_ID(pprli->fchdr, oxid); + FNIC_STD_SET_S_ID(pprli->fchdr, s_id); + FNIC_STD_SET_D_ID(pprli->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send PRLI to tgt: 0x%x with oxid: 0x%x", + iport->fcid, tport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + atomic64_inc(&iport->iport_stats.tport_prli_sent); + +err_out: + timeout = max(2 * iport->e_d_tov, iport->plogi_timeout); + /* Even if fnic_send_fcoe_frame() fails we want to retry after timeout */ + fdls_start_tport_timer(iport, tport, timeout); +} + +/** + * fdls_send_fabric_logo - Send flogo to the fcf + * @iport: Handle to fnic iport + * + * This function does not change or check the fabric state. + * It the caller's responsibility to set the appropriate iport fabric + * state when this is called. Normally it is FDLS_STATE_FABRIC_LOGO. + * Currently this assumes to be called with fnic lock held. + */ +void fdls_send_fabric_logo(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_logo *plogo; + struct fnic *fnic = iport->fnic; + uint8_t d_id[3]; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_logo); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric LOGO"); + return; + } + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_logo_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_LOGO, + &iport->active_oxid_fabric_req); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send fabric LOGO", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); + + hton24(d_id, FC_FID_FLOGI); + FNIC_STD_SET_D_ID(plogo->fchdr, d_id); + + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send fabric LOGO with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + fdls_start_fabric_timer(iport, 2 * iport->e_d_tov); +} + +/** + * fdls_tgt_logout - Send plogo to the remote port + * @iport: Handle to fnic iport + * @tport: Handle to remote port + * + * This function does not change or check the fabric/tport state. + * It the caller's responsibility to set the appropriate tport/fabric + * state when this is called. Normally that is fdls_tgt_state_plogo. + * This could be used to send plogo to nameserver process + * also not just target processes + */ +void fdls_tgt_logout(struct fnic_iport_s *iport, struct fnic_tport_s *tport) +{ + uint8_t *frame; + struct fc_std_logo *plogo; + struct fnic *fnic = iport->fnic; + uint8_t d_id[3]; + uint16_t oxid; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_logo); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send fabric LOGO"); + return; + } + + plogo = (struct fc_std_logo *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_logo_frame(frame, iport); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_TGT_LOGO, &tport->active_oxid); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send tgt LOGO", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(plogo->fchdr, oxid); + + hton24(d_id, tport->fcid); + FNIC_STD_SET_D_ID(plogo->fchdr, d_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send tgt LOGO with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); + + atomic64_inc(&iport->iport_stats.tport_logo_sent); +} + +static void fdls_tgt_discovery_start(struct fnic_iport_s *iport) +{ + struct fnic_tport_s *tport, *next; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Starting FDLS target discovery", iport->fcid); + + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + break; + } + /* if we marked the tport as deleted due to GPN_FT + * We should not send ADISC anymore + */ + if ((tport->state == FDLS_TGT_STATE_OFFLINING) || + (tport->state == FDLS_TGT_STATE_OFFLINE)) + continue; + + /* For tports which have received RSCN */ + if (tport->flags & FNIC_FDLS_TPORT_SEND_ADISC) { + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_ADISC); + tport->flags &= ~FNIC_FDLS_TPORT_SEND_ADISC; + fdls_send_tgt_adisc(iport, tport); + continue; + } + if (fdls_get_tport_state(tport) != FDLS_TGT_STATE_INIT) { + /* Not a new port, skip */ + continue; + } + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + fdls_send_tgt_plogi(iport, tport); + } + fdls_set_state((&iport->fabric), FDLS_STATE_TGT_DISCOVERY); +} + +/* + * Function to restart the IT nexus if we received any out of + * sequence PLOGI/PRLI response from the target. + * The memory for the new tport structure is allocated + * inside fdls_create_tport and added to the iport's tport list. + * This will get freed later during tport_offline/linkdown + * or module unload. The new_tport pointer will go out of scope + * safely since the memory it is + * pointing to it will be freed later + */ +static void fdls_target_restart_nexus(struct fnic_tport_s *tport) +{ + struct fnic_iport_s *iport = tport->iport; + struct fnic_tport_s *new_tport = NULL; + uint32_t fcid; + uint64_t wwpn; + int nexus_restart_count; + struct fnic *fnic = iport->fnic; + bool retval = true; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid: 0x%x state: %d restart_count: %d", + tport->fcid, tport->state, tport->nexus_restart_count); + + fcid = tport->fcid; + wwpn = tport->wwpn; + nexus_restart_count = tport->nexus_restart_count; + + retval = fdls_delete_tport(iport, tport); + if (retval != true) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Error deleting tport: 0x%x", fcid); + return; + } + + if (nexus_restart_count >= FNIC_TPORT_MAX_NEXUS_RESTART) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded nexus restart retries tport: 0x%x", + fcid); + return; + } + + /* + * Allocate memory for the new tport and add it to + * iport's tport list. + * This memory will be freed during tport_offline/linkdown + * or module unload. The pointer new_tport is safe to go + * out of scope when this function returns, since the memory + * it is pointing to is guaranteed to be freed later + * as mentioned above. + */ + new_tport = fdls_create_tport(iport, fcid, wwpn); + if (!new_tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Error creating new tport: 0x%x", fcid); + return; + } + + new_tport->nexus_restart_count = nexus_restart_count + 1; + fdls_send_tgt_plogi(iport, new_tport); + fdls_set_tport_state(new_tport, FDLS_TGT_STATE_PLOGI); +} + +struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport, + uint32_t fcid) +{ + struct fnic_tport_s *tport, *next; + + list_for_each_entry_safe(tport, next, &(iport->tport_list), links) { + if ((tport->fcid == fcid) + && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING)) + return tport; + } + return NULL; +} + +static struct fnic_tport_s *fdls_create_tport(struct fnic_iport_s *iport, + uint32_t fcid, uint64_t wwpn) +{ + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS create tport: fcid: 0x%x wwpn: 0x%llx", fcid, wwpn); + + tport = kzalloc(sizeof(struct fnic_tport_s), GFP_ATOMIC); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Memory allocation failure while creating tport: 0x%x\n", + fcid); + return NULL; + } + + tport->max_payload_size = FNIC_FCOE_MAX_FRAME_SZ; + tport->r_a_tov = FC_DEF_R_A_TOV; + tport->e_d_tov = FC_DEF_E_D_TOV; + tport->fcid = fcid; + tport->wwpn = wwpn; + tport->iport = iport; + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Need to setup tport timer callback"); + + timer_setup(&tport->retry_timer, fdls_tport_timer_callback, 0); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Added tport 0x%x", tport->fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_INIT); + list_add_tail(&tport->links, &iport->tport_list); + atomic_set(&tport->in_flight, 0); + return tport; +} + +struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport, + uint64_t wwpn) +{ + struct fnic_tport_s *tport, *next; + + list_for_each_entry_safe(tport, next, &(iport->tport_list), links) { + if ((tport->wwpn == wwpn) + && !(tport->flags & FNIC_FDLS_TPORT_TERMINATING)) + return tport; + } + return NULL; +} + +static void +fnic_fdmi_attr_set(void *attr_start, u16 type, u16 len, + void *data, u32 *off) +{ + u16 size = len + FC_FDMI_ATTR_ENTRY_HEADER_LEN; + struct fc_fdmi_attr_entry *fdmi_attr = (struct fc_fdmi_attr_entry *) + ((u8 *)attr_start + *off); + + put_unaligned_be16(type, &fdmi_attr->type); + put_unaligned_be16(size, &fdmi_attr->len); + memcpy(fdmi_attr->value, data, len); + *off += size; +} + +static void fdls_fdmi_register_hba(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_fdmi_rhba *prhba; + struct fc_fdmi_attr_entry *fdmi_attr; + uint8_t fcid[3]; + int err; + struct fnic *fnic = iport->fnic; + struct vnic_devcmd_fw_info *fw_info = NULL; + uint16_t oxid; + u32 attr_off_bytes, len; + u8 data[64]; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI RHBA"); + return; + } + + prhba = (struct fc_std_fdmi_rhba *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prhba = (struct fc_std_fdmi_rhba) { + .fchdr = { + .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0XFF, 0XFA}, + .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID) + }, + .fc_std_ct_hdr = { + .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT, + .ct_fs_subtype = FC_FDMI_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_FDMI_RHBA) + }, + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prhba->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RHBA, + &iport->active_oxid_fdmi_rhba); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI RHBA", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prhba->fchdr, oxid); + + put_unaligned_be64(iport->wwpn, &prhba->rhba.hbaid.id); + put_unaligned_be32(FNIC_FDMI_NUM_PORTS, &prhba->rhba.port.numport); + put_unaligned_be64(iport->wwpn, &prhba->rhba.port.port[0].portname); + put_unaligned_be32(FNIC_FDMI_NUM_HBA_ATTRS, + &prhba->rhba.hba_attrs.numattrs); + + fdmi_attr = prhba->rhba.hba_attrs.attr; + attr_off_bytes = 0; + + put_unaligned_be64(iport->wwnn, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_NODE_NAME, + FNIC_FDMI_NN_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "NN set, off=%d", attr_off_bytes); + + strscpy_pad(data, FNIC_FDMI_MANUFACTURER, FNIC_FDMI_MANU_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MANUFACTURER, + FNIC_FDMI_MANU_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFG set <%s>, off=%d", data, attr_off_bytes); + + err = vnic_dev_fw_info(fnic->vdev, &fw_info); + if (!err) { + strscpy_pad(data, fw_info->hw_serial_number, + FNIC_FDMI_SERIAL_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SERIAL_NUMBER, + FNIC_FDMI_SERIAL_LEN, data, &attr_off_bytes); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SERIAL set <%s>, off=%d", data, attr_off_bytes); + + } + + if (fnic->subsys_desc_len >= FNIC_FDMI_MODEL_LEN) + fnic->subsys_desc_len = FNIC_FDMI_MODEL_LEN - 1; + strscpy_pad(data, fnic->subsys_desc, FNIC_FDMI_MODEL_LEN); + data[FNIC_FDMI_MODEL_LEN - 1] = 0; + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL, FNIC_FDMI_MODEL_LEN, + data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MODEL set <%s>, off=%d", data, attr_off_bytes); + + strscpy_pad(data, FNIC_FDMI_MODEL_DESCRIPTION, FNIC_FDMI_MODEL_DES_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MODEL_DES, + FNIC_FDMI_MODEL_DES_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MODEL_DESC set <%s>, off=%d", data, attr_off_bytes); + + if (!err) { + strscpy_pad(data, fw_info->hw_version, FNIC_FDMI_HW_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HARDWARE_VERSION, + FNIC_FDMI_HW_VER_LEN, data, &attr_off_bytes); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "HW_VER set <%s>, off=%d", data, attr_off_bytes); + + } + + strscpy_pad(data, DRV_VERSION, FNIC_FDMI_DR_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_DRIVER_VERSION, + FNIC_FDMI_DR_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "DRV_VER set <%s>, off=%d", data, attr_off_bytes); + + strscpy_pad(data, "N/A", FNIC_FDMI_ROM_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_ROM_VERSION, + FNIC_FDMI_ROM_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ROM_VER set <%s>, off=%d", data, attr_off_bytes); + + if (!err) { + strscpy_pad(data, fw_info->fw_version, FNIC_FDMI_FW_VER_LEN); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FIRMWARE_VERSION, + FNIC_FDMI_FW_VER_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FW_VER set <%s>, off=%d", data, attr_off_bytes); + } + + len = sizeof(struct fc_std_fdmi_rhba) + attr_off_bytes; + frame_size += len; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RHBA with oxid: 0x%x fs: %d", iport->fcid, + oxid, frame_size); + + fnic_send_fcoe_frame(iport, frame, frame_size); + iport->fabric.fdmi_pending |= FDLS_FDMI_REG_HBA_PENDING; +} + +static void fdls_fdmi_register_pa(struct fnic_iport_s *iport) +{ + uint8_t *frame; + struct fc_std_fdmi_rpa *prpa; + struct fc_fdmi_attr_entry *fdmi_attr; + uint8_t fcid[3]; + struct fnic *fnic = iport->fnic; + u32 port_speed_bm; + u32 port_speed = vnic_dev_port_speed(fnic->vdev); + uint16_t oxid; + u32 attr_off_bytes, len; + u8 tmp_data[16], data[64]; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send FDMI RPA"); + return; + } + + prpa = (struct fc_std_fdmi_rpa *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *prpa = (struct fc_std_fdmi_rpa) { + .fchdr = { + .fh_r_ctl = FC_RCTL_DD_UNSOL_CTL, + .fh_d_id = {0xFF, 0xFF, 0xFA}, + .fh_type = FC_TYPE_CT, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID) + }, + .fc_std_ct_hdr = { + .ct_rev = FC_CT_REV, .ct_fs_type = FC_FST_MGMT, + .ct_fs_subtype = FC_FDMI_SUBTYPE, + .ct_cmd = cpu_to_be16(FC_FDMI_RPA) + }, + }; + + hton24(fcid, iport->fcid); + FNIC_STD_SET_S_ID(prpa->fchdr, fcid); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FDMI_RPA, + &iport->active_oxid_fdmi_rpa); + + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate OXID to send FDMI RPA", + iport->fcid); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(prpa->fchdr, oxid); + + put_unaligned_be64(iport->wwpn, &prpa->rpa.port.portname); + put_unaligned_be32(FNIC_FDMI_NUM_PORT_ATTRS, + &prpa->rpa.hba_attrs.numattrs); + + /* MDS does not support GIGE speed. + * Bit shift standard definitions from scsi_transport_fc.h to + * match FC spec. + */ + switch (port_speed) { + case DCEM_PORTSPEED_10G: + case DCEM_PORTSPEED_20G: + /* There is no bit for 20G */ + port_speed_bm = FC_PORTSPEED_10GBIT << PORT_SPEED_BIT_14; + break; + case DCEM_PORTSPEED_25G: + port_speed_bm = FC_PORTSPEED_25GBIT << PORT_SPEED_BIT_8; + break; + case DCEM_PORTSPEED_40G: + case DCEM_PORTSPEED_4x10G: + port_speed_bm = FC_PORTSPEED_40GBIT << PORT_SPEED_BIT_9; + break; + case DCEM_PORTSPEED_100G: + port_speed_bm = FC_PORTSPEED_100GBIT << PORT_SPEED_BIT_8; + break; + default: + port_speed_bm = FC_PORTSPEED_1GBIT << PORT_SPEED_BIT_15; + break; + } + attr_off_bytes = 0; + + fdmi_attr = prpa->rpa.hba_attrs.attr; + + put_unaligned_be64(iport->wwnn, data); + + memset(data, 0, FNIC_FDMI_FC4_LEN); + data[2] = 1; + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_FC4_TYPES, + FNIC_FDMI_FC4_LEN, data, &attr_off_bytes); + + put_unaligned_be32(port_speed_bm, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_SUPPORTED_SPEEDS, + FNIC_FDMI_SUPP_SPEED_LEN, data, &attr_off_bytes); + + put_unaligned_be32(port_speed_bm, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_CURRENT_SPEED, + FNIC_FDMI_CUR_SPEED_LEN, data, &attr_off_bytes); + + put_unaligned_be32(FNIC_FDMI_MFS, data); + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_MAX_FRAME_SIZE, + FNIC_FDMI_MFS_LEN, data, &attr_off_bytes); + + snprintf(tmp_data, FNIC_FDMI_OS_NAME_LEN - 1, "host%d", + fnic->host->host_no); + strscpy_pad(data, tmp_data, FNIC_FDMI_OS_NAME_LEN); + data[FNIC_FDMI_OS_NAME_LEN - 1] = 0; + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_OS_NAME, + FNIC_FDMI_OS_NAME_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "OS name set <%s>, off=%d", data, attr_off_bytes); + + sprintf(fc_host_system_hostname(fnic->host), "%s", utsname()->nodename); + strscpy_pad(data, fc_host_system_hostname(fnic->host), + FNIC_FDMI_HN_LEN); + data[FNIC_FDMI_HN_LEN - 1] = 0; + fnic_fdmi_attr_set(fdmi_attr, FNIC_FDMI_TYPE_HOST_NAME, + FNIC_FDMI_HN_LEN, data, &attr_off_bytes); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Host name set <%s>, off=%d", data, attr_off_bytes); + + len = sizeof(struct fc_std_fdmi_rpa) + attr_off_bytes; + frame_size += len; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send FDMI RPA with oxid: 0x%x fs: %d", iport->fcid, + oxid, frame_size); + + fnic_send_fcoe_frame(iport, frame, frame_size); + iport->fabric.fdmi_pending |= FDLS_FDMI_RPA_PENDING; +} + +void fdls_fabric_timer_callback(struct timer_list *t) +{ + struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, retry_timer); + struct fnic_iport_s *iport = + container_of(fabric, struct fnic_iport_s, fabric); + struct fnic *fnic = iport->fnic; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tp: %d fab state: %d fab retry counter: %d max_flogi_retries: %d", + iport->fabric.timer_pending, iport->fabric.state, + iport->fabric.retry_counter, iport->max_flogi_retries); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (!iport->fabric.timer_pending) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (iport->fabric.del_timer_inprogress) { + iport->fabric.del_timer_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fabric_del_timer inprogress(%d). Skip timer cb", + iport->fabric.del_timer_inprogress); + return; + } + + iport->fabric.timer_pending = 0; + + /* The fabric state indicates which frames have time out, and we retry */ + switch (iport->fabric.state) { + case FDLS_STATE_FABRIC_FLOGI: + /* Flogi received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < iport->max_flogi_retries)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_fabric_flogi(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* Flogi has time out 2*ed_tov send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out + * Mark the OXID to be freed after 2 * r_a_tov and retry the req + */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < iport->max_flogi_retries) { + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + fdls_send_fabric_flogi(iport); + } else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max FLOGI retries"); + } + break; + case FDLS_STATE_FABRIC_PLOGI: + /* Plogi received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < iport->max_plogi_retries)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_fabric_plogi(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* Plogi has timed out 2*ed_tov send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out + * Mark the OXID to be freed after 2 * r_a_tov and retry the req + */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < iport->max_plogi_retries) { + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + fdls_send_fabric_plogi(iport); + } else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max PLOGI retries"); + } + break; + case FDLS_STATE_RPN_ID: + /* Rpn_id received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_rpn_id(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* RPN has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_SCR: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_scr(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* scr has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI: %p", iport); + fnic_fdls_start_plogi(iport); + } + break; + case FDLS_STATE_REGISTER_FC4_TYPES: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_register_fc4_types(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* RFT_ID timed out send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI: %p", iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_REGISTER_FC4_FEATURES: + /* scr received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_register_fc4_features(iport); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) + /* SCR has timed out. Send abts */ + fdls_send_fabric_abts(iport); + else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timed out. Starting PLOGI %p", iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FDLS_STATE_RSCN_GPN_FT: + case FDLS_STATE_SEND_GPNFT: + case FDLS_STATE_GPN_FT: + /* GPN_FT received a LS_RJT with busy we retry from here */ + if ((iport->fabric.flags & FNIC_FDLS_RETRY_FRAME) + && (iport->fabric.retry_counter < FDLS_RETRY_COUNT)) { + iport->fabric.flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_gpn_ft(iport, iport->fabric.state); + } else if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + /* gpn_ft has timed out. Send abts */ + fdls_send_fabric_abts(iport); + } else { + /* ABTS has timed out */ + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) { + fdls_send_gpn_ft(iport, iport->fabric.state); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ABTS timeout for fabric GPN_FT. Check name server: %p", + iport); + } + } + break; + default: + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fdls_fdmi_timer_callback(struct timer_list *t) +{ + struct fnic_fdls_fabric_s *fabric = from_timer(fabric, t, fdmi_timer); + struct fnic_iport_s *iport = + container_of(fabric, struct fnic_iport_s, fabric); + struct fnic *fnic = iport->fnic; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + if (!iport->fabric.fdmi_pending) { + /* timer expired after fdmi responses received. */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + /* if not abort pending, send an abort */ + if (!(iport->fabric.fdmi_pending & FDLS_FDMI_ABORT_PENDING)) { + fdls_send_fdmi_abts(iport); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + /* ABTS pending for an active fdmi request that is pending. + * That means FDMI ABTS timed out + * Schedule to free the OXID after 2*r_a_tov and proceed + */ + if (iport->fabric.fdmi_pending & FDLS_FDMI_PLOGI_PENDING) { + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_plogi); + } else { + if (iport->fabric.fdmi_pending & FDLS_FDMI_REG_HBA_PENDING) + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rhba); + if (iport->fabric.fdmi_pending & FDLS_FDMI_RPA_PENDING) + fdls_schedule_oxid_free(iport, &iport->active_oxid_fdmi_rpa); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + + iport->fabric.fdmi_pending = 0; + /* If max retries not exhaused, start over from fdmi plogi */ + if (iport->fabric.fdmi_retry < FDLS_FDMI_MAX_RETRY) { + iport->fabric.fdmi_retry++; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "retry fdmi timer %d", iport->fabric.fdmi_retry); + fdls_send_fdmi_plogi(iport); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fdmi timer callback : 0x%x\n", iport->fabric.fdmi_pending); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fdls_send_delete_tport_msg(struct fnic_tport_s *tport) +{ + struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport; + struct fnic *fnic = iport->fnic; + struct fnic_tport_event_s *tport_del_evt; + + tport_del_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_del_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for tport event fcid: 0x%x", + tport->fcid); + return; + } + tport_del_evt->event = TGT_EV_TPORT_DELETE; + tport_del_evt->arg1 = (void *) tport; + list_add_tail(&tport_del_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); +} + +static void fdls_tport_timer_callback(struct timer_list *t) +{ + struct fnic_tport_s *tport = from_timer(tport, t, retry_timer); + struct fnic_iport_s *iport = (struct fnic_iport_s *) tport->iport; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!tport->timer_pending) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + if (tport->del_timer_inprogress) { + tport->del_timer_inprogress = 0; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport_del_timer inprogress. Skip timer cb tport fcid: 0x%x\n", + tport->fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid: 0x%x timer pending: %d state: %d retry counter: %d", + tport->fcid, tport->timer_pending, tport->state, + tport->retry_counter); + + tport->timer_pending = 0; + oxid = tport->active_oxid; + + /* We retry plogi/prli/adisc frames depending on the tport state */ + switch (tport->state) { + case FDLS_TGT_STATE_PLOGI: + /* PLOGI frame received a LS_RJT with busy, we retry from here */ + if ((tport->flags & FNIC_FDLS_RETRY_FRAME) + && (tport->retry_counter < iport->max_plogi_retries)) { + tport->flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_tgt_plogi(iport, tport); + } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + /* Plogi frame has timed out, send abts */ + fdls_send_tport_abts(iport, tport); + } else if (tport->retry_counter < iport->max_plogi_retries) { + /* + * ABTS has timed out + */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + } else { + /* exceeded plogi retry count */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_delete_tport_msg(tport); + } + break; + case FDLS_TGT_STATE_PRLI: + /* PRLI received a LS_RJT with busy , hence we retry from here */ + if ((tport->flags & FNIC_FDLS_RETRY_FRAME) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + tport->flags &= ~FNIC_FDLS_RETRY_FRAME; + fdls_send_tgt_prli(iport, tport); + } else if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + /* PRLI has time out, send abts */ + fdls_send_tport_abts(iport, tport); + } else { + /* ABTS has timed out for prli, we go back to PLOGI */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + } + break; + case FDLS_TGT_STATE_ADISC: + /* ADISC timed out send an ABTS */ + if (!(tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + fdls_send_tport_abts(iport, tport); + } else if ((tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + /* + * ABTS has timed out + */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + fdls_send_tgt_adisc(iport, tport); + } else { + /* exceeded retry count */ + fdls_schedule_oxid_free(iport, &tport->active_oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC not responding. Deleting target port: 0x%x", + tport->fcid); + fdls_send_delete_tport_msg(tport); + } + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "oxid: 0x%x Unknown tport state: 0x%x", oxid, tport->state); + break; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +static void fnic_fdls_start_flogi(struct fnic_iport_s *iport) +{ + iport->fabric.retry_counter = 0; + fdls_send_fabric_flogi(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_FLOGI); + iport->fabric.flags = 0; +} + +static void fnic_fdls_start_plogi(struct fnic_iport_s *iport) +{ + iport->fabric.retry_counter = 0; + fdls_send_fabric_plogi(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_FABRIC_PLOGI); + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + if ((fnic_fdmi_support == 1) && (!(iport->flags & FNIC_FDMI_ACTIVE))) { + /* we can do FDMI at the same time */ + iport->fabric.fdmi_retry = 0; + timer_setup(&iport->fabric.fdmi_timer, fdls_fdmi_timer_callback, + 0); + fdls_send_fdmi_plogi(iport); + iport->flags |= FNIC_FDMI_ACTIVE; + } +} +static void +fdls_process_tgt_adisc_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint64_t frame_wwnn; + uint64_t frame_wwpn; + uint16_t oxid; + struct fc_std_els_adisc *adisc_rsp = (struct fc_std_els_adisc *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic *fnic = iport->fnic; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Tgt ADISC response tport not found: 0x%x", tgt_fcid); + return; + } + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->state != FDLS_TGT_STATE_ADISC) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping this ADISC response"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport state: %d tport state: %d Is abort issued on PRLI? %d", + iport->state, tport->state, + (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)); + return; + } + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame from target: 0x%x", + tgt_fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reason: Stale ADISC/Aborted ADISC/OOO frame delivery"); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (adisc_rsp->els.adisc_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_adisc_ls_accepts); + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport 0x%p Canceling fabric disc timer\n", + tport); + fnic_del_tport_timer_sync(fnic, tport); + } + tport->timer_pending = 0; + tport->retry_counter = 0; + frame_wwnn = get_unaligned_be64(&adisc_rsp->els.adisc_wwnn); + frame_wwpn = get_unaligned_be64(&adisc_rsp->els.adisc_wwpn); + if ((frame_wwnn == tport->wwnn) && (frame_wwpn == tport->wwpn)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC accepted from target: 0x%x. Target logged in", + tgt_fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_READY); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Error mismatch frame: ADISC"); + } + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_adisc_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + + /* Retry ADISC again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ADISC returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + fdls_delete_tport(iport, tport); + } + break; + } +} +static void +fdls_process_tgt_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint16_t oxid; + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + uint16_t max_payload_size; + struct fnic *fnic = iport->fnic; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS processing target PLOGI response: tgt_fcid: 0x%x", + tgt_fcid); + + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport not found: 0x%x", tgt_fcid); + return; + } + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame! iport state: %d tport state: %d", + iport->state, tport->state); + return; + } + + if (tport->state != FDLS_TGT_STATE_PLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI rsp recvd in wrong state. Drop the frame and restart nexus"); + fdls_target_restart_nexus(tport); + return; + } + + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI response from target: 0x%x. Dropping frame", + tgt_fcid); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_plogi_ls_accepts); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI accepted by target: 0x%x", tgt_fcid); + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_plogi_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < iport->max_plogi_retries)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + /* Retry plogi again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + fdls_delete_tport(iport, tport); + return; + + default: + atomic64_inc(&iport->iport_stats.tport_plogi_misc_rejects); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI not accepted from target fcid: 0x%x", + tgt_fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Found the PLOGI target: 0x%x and state: %d", + (unsigned int) tgt_fcid, tport->state); + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + } + + tport->timer_pending = 0; + tport->wwpn = get_unaligned_be64(&FNIC_LOGI_PORT_NAME(plogi_rsp->els)); + tport->wwnn = get_unaligned_be64(&FNIC_LOGI_NODE_NAME(plogi_rsp->els)); + + /* Learn the Service Params */ + + /* Max frame size - choose the lowest */ + max_payload_size = fnic_fc_plogi_rsp_rdf(iport, plogi_rsp); + tport->max_payload_size = + min(max_payload_size, iport->max_payload_size); + + if (tport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MFS: tport max frame size below spec bounds: %d", + tport->max_payload_size); + tport->max_payload_size = FNIC_MIN_DATA_FIELD_SIZE; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "MAX frame size: %u iport max_payload_size: %d tport mfs: %d", + max_payload_size, iport->max_payload_size, + tport->max_payload_size); + + tport->max_concur_seqs = FNIC_FC_PLOGI_RSP_CONCUR_SEQ(plogi_rsp); + + tport->retry_counter = 0; + fdls_set_tport_state(tport, FDLS_TGT_STATE_PRLI); + fdls_send_tgt_prli(iport, tport); +} +static void +fdls_process_tgt_prli_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint16_t oxid; + struct fc_std_els_prli *prli_rsp = (struct fc_std_els_prli *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic_tport_event_s *tport_add_evt; + struct fnic *fnic = iport->fnic; + bool mismatched_tgt = false; + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process tgt PRLI response: 0x%x", tgt_fcid); + + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport not found: 0x%x", tgt_fcid); + /* Handle or just drop? */ + return; + } + + if ((iport->state != FNIC_IPORT_STATE_READY) + || (tport->flags & FNIC_FDLS_TGT_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame! iport st: %d tport st: %d tport fcid: 0x%x", + iport->state, tport->state, tport->fcid); + return; + } + + if (tport->state != FDLS_TGT_STATE_PRLI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI rsp recvd in wrong state. Drop frame. Restarting nexus"); + fdls_target_restart_nexus(tport); + return; + } + + if (FNIC_STD_GET_OX_ID(fchdr) != tport->active_oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping PRLI response from target: 0x%x ", + tgt_fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reason: Stale PRLI response/Aborted PDISC/OOO frame delivery"); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &tport->active_oxid); + + switch (prli_rsp->els_prli.prli_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.tport_prli_ls_accepts); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI accepted from target: 0x%x", tgt_fcid); + + if (prli_rsp->sp.spp_type != FC_FC4_TYPE_SCSI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "mismatched target zoned with FC SCSI initiator: 0x%x", + tgt_fcid); + mismatched_tgt = true; + } + if (mismatched_tgt) { + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + } + break; + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.tport_prli_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (tport->retry_counter < FDLS_RETRY_COUNT)) { + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI ret ELS_LS_RJT BUSY. Retry from timer routine: 0x%x", + tgt_fcid); + + /*Retry Plogi again from the timer routine. */ + tport->flags |= FNIC_FDLS_RETRY_FRAME; + return; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI returned ELS_LS_RJT from target: 0x%x", + tgt_fcid); + + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + default: + atomic64_inc(&iport->iport_stats.tport_prli_misc_rejects); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI not accepted from target: 0x%x", tgt_fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Found the PRLI target: 0x%x and state: %d", + (unsigned int) tgt_fcid, tport->state); + + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + } + tport->timer_pending = 0; + + /* Learn Service Params */ + tport->fcp_csp = be32_to_cpu(prli_rsp->sp.spp_params); + tport->retry_counter = 0; + + if (tport->fcp_csp & FCP_SPPF_RETRY) + tport->tgt_flags |= FNIC_FC_RP_FLAGS_RETRY; + + /* Check if the device plays Target Mode Function */ + if (!(tport->fcp_csp & FCP_PRLI_FUNC_TARGET)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remote port(0x%x): no target support. Deleting it\n", + tgt_fcid); + fdls_tgt_logout(iport, tport); + fdls_delete_tport(iport, tport); + return; + } + + fdls_set_tport_state(tport, FDLS_TGT_STATE_READY); + + /* Inform the driver about new target added */ + tport_add_evt = kzalloc(sizeof(struct fnic_tport_event_s), GFP_ATOMIC); + if (!tport_add_evt) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport event memory allocation failure: 0x%0x\n", + tport->fcid); + return; + } + tport_add_evt->event = TGT_EV_RPORT_ADD; + tport_add_evt->arg1 = (void *) tport; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x add tport event fcid: 0x%x\n", + tport->fcid, iport->fcid); + list_add_tail(&tport_add_evt->links, &fnic->tport_event_list); + queue_work(fnic_event_queue, &fnic->tport_work); +} + + +static void +fdls_process_rff_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic *fnic = iport->fnic; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rff_id *rff_rsp = (struct fc_std_rff_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_FEATURES) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + rsp = FNIC_STD_GET_FC_CT_CMD((&rff_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RFF ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_set_state((&iport->fabric), FDLS_STATE_SCR); + fdls_send_scr(iport); + break; + case FC_FS_RJT: + reason_code = rff_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID ret ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF_ID returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_rft_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rft_id *rft_rsp = (struct fc_std_rft_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_REGISTER_FC4_TYPES) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFT_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + + rsp = FNIC_STD_GET_FC_CT_CMD((&rft_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RFT ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_send_register_fc4_features(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_FEATURES); + break; + case FC_FS_RJT: + reason_code = rft_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: RFT_ID ret ELS_LS_RJT BUSY. Retry from timer routine", + iport->fcid); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: RFT_ID REJ. Halting discovery reason %d expl %d", + iport->fcid, reason_code, + rft_rsp->fc_std_ct_hdr.ct_explan); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_rpn_id_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_rpn_id *rpn_rsp = (struct fc_std_rpn_id *) fchdr; + uint16_t rsp; + uint8_t reason_code; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state(fdls) != FDLS_STATE_RPN_ID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + + rsp = FNIC_STD_GET_FC_CT_CMD((&rpn_rsp->fc_std_ct_hdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process RPN ID response: 0x%04x", iport->fcid, + (uint32_t) rsp); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (rsp) { + case FC_FS_ACC: + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + fdls->retry_counter = 0; + fdls_send_register_fc4_types(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_REGISTER_FC4_TYPES); + break; + case FC_FS_RJT: + reason_code = rpn_rsp->fc_std_ct_hdr.ct_reason; + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID returned REJ BUSY. Retry from timer routine %p", + iport); + + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RPN_ID ELS_LS_RJT. Halting discovery %p", iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + default: + break; + } +} + +static void +fdls_process_scr_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_scr *scr_rsp = (struct fc_std_scr *) fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process SCR response: 0x%04x", + (uint32_t) scr_rsp->scr.scr_cmd); + + if (fdls_get_state(fdls) != FDLS_STATE_SCR) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR resp recvd in state(%d). Dropping.", + fdls_get_state(fdls)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (scr_rsp->scr.scr_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_scr_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_send_gpn_ft(iport, FDLS_STATE_GPN_FT); + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_scr_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + fdls->timer_pending = 0; + fdls->retry_counter = 0; + } + break; + + default: + atomic64_inc(&iport->iport_stats.fabric_scr_misc_rejects); + break; + } +} + +static void +fdls_process_gpn_ft_tgt_list(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, int len) +{ + struct fc_gpn_ft_rsp_iu *gpn_ft_tgt; + struct fnic_tport_s *tport, *next; + uint32_t fcid; + uint64_t wwpn; + int rem_len = len; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS process GPN_FT tgt list", iport->fcid); + + gpn_ft_tgt = + (struct fc_gpn_ft_rsp_iu *)((uint8_t *) fchdr + + sizeof(struct fc_frame_header) + + sizeof(struct fc_ct_hdr)); + len -= sizeof(struct fc_frame_header) + sizeof(struct fc_ct_hdr); + + while (rem_len > 0) { + + fcid = ntoh24(gpn_ft_tgt->fcid); + wwpn = be64_to_cpu(gpn_ft_tgt->wwpn); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "tport: 0x%x: ctrl:0x%x", fcid, gpn_ft_tgt->ctrl); + + if (fcid == iport->fcid) { + if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST) + break; + gpn_ft_tgt++; + rem_len -= sizeof(struct fc_gpn_ft_rsp_iu); + continue; + } + + tport = fnic_find_tport_by_wwpn(iport, wwpn); + if (!tport) { + /* + * New port registered with the switch or first time query + */ + tport = fdls_create_tport(iport, fcid, wwpn); + if (!tport) + return; + } + /* + * check if this was an existing tport with same fcid + * but whose wwpn has changed now ,then remove it and + * create a new one + */ + if (tport->fcid != fcid) { + fdls_delete_tport(iport, tport); + tport = fdls_create_tport(iport, fcid, wwpn); + if (!tport) + return; + } + + /* + * If this GPN_FT rsp is after RSCN then mark the tports which + * matches with the new GPN_FT list, if some tport is not + * found in GPN_FT we went to delete that tport later. + */ + if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) + tport->flags |= FNIC_FDLS_TPORT_IN_GPN_FT_LIST; + + if (gpn_ft_tgt->ctrl & FC_NS_FID_LAST) + break; + + gpn_ft_tgt++; + rem_len -= sizeof(struct fc_gpn_ft_rsp_iu); + } + if (rem_len <= 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT response: malformed/corrupt frame rxlen: %d remlen: %d", + len, rem_len); +} + + /*remove those ports which was not listed in GPN_FT */ + if (fdls_get_state((&iport->fabric)) == FDLS_STATE_RSCN_GPN_FT) { + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + + if (!(tport->flags & FNIC_FDLS_TPORT_IN_GPN_FT_LIST)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remove port: 0x%x not found in GPN_FT list", + tport->fcid); + fdls_delete_tport(iport, tport); + } else { + tport->flags &= ~FNIC_FDLS_TPORT_IN_GPN_FT_LIST; + } + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + return; + } + } + } +} + +static void +fdls_process_gpn_ft_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, int len) +{ + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fc_std_gpn_ft *gpn_ft_rsp = (struct fc_std_gpn_ft *) fchdr; + uint16_t rsp; + uint8_t reason_code; + int count = 0; + struct fnic_tport_s *tport, *next; + u32 old_link_down_cnt = iport->fnic->link_down_cnt; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process GPN_FT response: iport state: %d len: %d", + iport->state, len); + + /* + * GPNFT response :- + * FDLS_STATE_GPN_FT : GPNFT send after SCR state + * during fabric discovery(FNIC_IPORT_STATE_FABRIC_DISC) + * FDLS_STATE_RSCN_GPN_FT : GPNFT send in response to RSCN + * FDLS_STATE_SEND_GPNFT : GPNFT send after deleting a Target, + * e.g. after receiving Target LOGO + * FDLS_STATE_TGT_DISCOVERY :Target discovery is currently in progress + * from previous GPNFT response,a new GPNFT response has come. + */ + if (!(((iport->state == FNIC_IPORT_STATE_FABRIC_DISC) + && (fdls_get_state(fdls) == FDLS_STATE_GPN_FT)) + || ((iport->state == FNIC_IPORT_STATE_READY) + && ((fdls_get_state(fdls) == FDLS_STATE_RSCN_GPN_FT) + || (fdls_get_state(fdls) == FDLS_STATE_SEND_GPNFT) + || (fdls_get_state(fdls) == FDLS_STATE_TGT_DISCOVERY))))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPNFT resp recvd in fab state(%d) iport_state(%d). Dropping.", + fdls_get_state(fdls), iport->state); + return; + } + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + iport->state = FNIC_IPORT_STATE_READY; + rsp = FNIC_STD_GET_FC_CT_CMD((&gpn_ft_rsp->fc_std_ct_hdr)); + + switch (rsp) { + + case FC_FS_ACC: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP accept", iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_process_gpn_ft_tgt_list(iport, fchdr, len); + + /* + * iport state can change only if link down event happened + * We don't need to undo fdls_process_gpn_ft_tgt_list, + * that will be taken care in next link up event + */ + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Halting target discovery: fab st: %d iport st: %d ", + fdls_get_state(fdls), iport->state); + break; + } + fdls_tgt_discovery_start(iport); + break; + + case FC_FS_RJT: + reason_code = gpn_ft_rsp->fc_std_ct_hdr.ct_reason; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP Reject reason: %d", iport->fcid, reason_code); + + if (((reason_code == FC_FS_RJT_BSY) + || (reason_code == FC_FS_RJT_UNABL)) + && (fdls->retry_counter < FDLS_RETRY_COUNT)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP ret REJ/BSY. Retry from timer routine", + iport->fcid); + /* Retry again from the timer routine */ + fdls->flags |= FNIC_FDLS_RETRY_FRAME; + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: GPNFT_RSP reject", iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + /* + * If GPN_FT ls_rjt then we should delete + * all existing tports + */ + count = 0; + list_for_each_entry_safe(tport, next, &iport->tport_list, + links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT_REJECT: Remove port: 0x%x", + tport->fcid); + fdls_delete_tport(iport, tport); + if ((old_link_down_cnt != iport->fnic->link_down_cnt) + || (iport->state != FNIC_IPORT_STATE_READY)) { + return; + } + count++; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN_FT_REJECT: Removed (0x%x) ports", count); + } + break; + + default: + break; + } +} + +/** + * fdls_process_fabric_logo_rsp - Handle an flogo response from the fcf + * @iport: Handle to fnic iport + * @fchdr: Incoming frame + */ +static void +fdls_process_fabric_logo_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *flogo_rsp = (struct fc_std_flogi *) fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + } + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (flogo_rsp->els.fl_cmd) { + case ELS_LS_ACC: + if (iport->fabric.state != FDLS_STATE_FABRIC_LOGO) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response. Fabric not in LOGO state. Dropping! %p", + iport); + return; + } + + iport->fabric.state = FDLS_STATE_FLOGO_DONE; + iport->state = FNIC_IPORT_STATE_LINK_WAIT; + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport 0x%p Canceling fabric disc timer\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response from Fabric for did: 0x%x", + ntoh24(fchdr->fh_d_id)); + return; + + case ELS_LS_RJT: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flogo response from Fabric for did: 0x%x returned ELS_LS_RJT", + ntoh24(fchdr->fh_d_id)); + return; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGO response not accepted or rejected: 0x%x", + flogo_rsp->els.fl_cmd); + } +} + +static void +fdls_process_flogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, void *rx_frame) +{ + struct fnic_fdls_fabric_s *fabric = &iport->fabric; + struct fc_std_flogi *flogi_rsp = (struct fc_std_flogi *) fchdr; + uint8_t *fcid; + uint16_t rdf_size; + uint8_t fcmac[6] = { 0x0E, 0XFC, 0x00, 0x00, 0x00, 0x00 }; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS processing FLOGI response", iport->fcid); + + if (fdls_get_state(fabric) != FDLS_STATE_FABRIC_FLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received in state (%d). Dropping frame", + fdls_get_state(fabric)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fabric), oxid, iport->active_oxid_fabric_req); + return; + } + + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (flogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_flogi_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fcid = FNIC_STD_GET_D_ID(fchdr); + iport->fcid = ntoh24(fcid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FLOGI response accepted", iport->fcid); + + /* Learn the Service Params */ + rdf_size = be16_to_cpu(FNIC_LOGI_RDF_SIZE(flogi_rsp->els)); + if ((rdf_size >= FNIC_MIN_DATA_FIELD_SIZE) + && (rdf_size < FNIC_FC_MAX_PAYLOAD_LEN)) + iport->max_payload_size = min(rdf_size, + iport->max_payload_size); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "max_payload_size from fabric: %u set: %d", rdf_size, + iport->max_payload_size); + + iport->r_a_tov = be32_to_cpu(FNIC_LOGI_R_A_TOV(flogi_rsp->els)); + iport->e_d_tov = be32_to_cpu(FNIC_LOGI_E_D_TOV(flogi_rsp->els)); + + if (FNIC_LOGI_FEATURES(flogi_rsp->els) & FNIC_FC_EDTOV_NSEC) + iport->e_d_tov = iport->e_d_tov / FNIC_NSEC_TO_MSEC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "From fabric: R_A_TOV: %d E_D_TOV: %d", + iport->r_a_tov, iport->e_d_tov); + + fc_host_fabric_name(iport->fnic->host) = + get_unaligned_be64(&FNIC_LOGI_NODE_NAME(flogi_rsp->els)); + fc_host_port_id(iport->fnic->host) = iport->fcid; + + fnic_fdls_learn_fcoe_macs(iport, rx_frame, fcid); + + if (fnic_fdls_register_portid(iport, iport->fcid, rx_frame) != 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FLOGI registration failed", iport->fcid); + break; + } + + memcpy(&fcmac[3], fcid, 3); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Adding vNIC device MAC addr: %02x:%02x:%02x:%02x:%02x:%02x", + fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], + fcmac[5]); + vnic_dev_add_addr(iport->fnic->vdev, fcmac); + + if (fdls_get_state(fabric) == FDLS_STATE_FABRIC_FLOGI) { + fnic_fdls_start_plogi(iport); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received. Starting PLOGI"); + } else { + /* From FDLS_STATE_FABRIC_FLOGI state fabric can only go to + * FDLS_STATE_LINKDOWN + * state, hence we don't have to worry about undoing: + * the fnic_fdls_register_portid and vnic_dev_add_addr + */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response received in state (%d). Dropping frame", + fdls_get_state(fabric)); + } + break; + + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_flogi_ls_rejects); + if (fabric->retry_counter < iport->max_flogi_retries) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI returned ELS_LS_RJT BUSY. Retry from timer routine %p", + iport); + + /* Retry Flogi again from the timer routine. */ + fabric->flags |= FNIC_FDLS_RETRY_FRAME; + + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI returned ELS_LS_RJT. Halting discovery %p", + iport); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport 0x%p Canceling fabric disc timer\n", + iport); + fnic_del_fabric_timer_sync(fnic); + } + fabric->timer_pending = 0; + fabric->retry_counter = 0; + } + break; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI response not accepted: 0x%x", + flogi_rsp->els.fl_cmd); + atomic64_inc(&iport->iport_stats.fabric_flogi_misc_rejects); + break; + } +} + +static void +fdls_process_fabric_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *) fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *) fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (fdls_get_state((&iport->fabric)) != FDLS_STATE_FABRIC_PLOGI) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Fabric PLOGI response received in state (%d). Dropping frame", + fdls_get_state(&iport->fabric)); + return; + } + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fabric_req); + return; + } + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + atomic64_inc(&iport->iport_stats.fabric_plogi_ls_accepts); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x fabric PLOGI response: Accepted\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + fdls_set_state(&iport->fabric, FDLS_STATE_RPN_ID); + fdls_send_rpn_id(iport); + break; + case ELS_LS_RJT: + atomic64_inc(&iport->iport_stats.fabric_plogi_ls_rejects); + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (iport->fabric.retry_counter < iport->max_plogi_retries)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Fabric PLOGI ELS_LS_RJT BUSY. Retry from timer routine", + iport->fcid); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Fabric PLOGI ELS_LS_RJT. Halting discovery", + iport->fcid); + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x Canceling fabric disc timer\n", + iport->fcid); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.retry_counter = 0; + return; + } + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI response not accepted: 0x%x", + plogi_rsp->els.fl_cmd); + atomic64_inc(&iport->iport_stats.fabric_plogi_misc_rejects); + break; + } +} + +static void fdls_process_fdmi_plogi_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_flogi *plogi_rsp = (struct fc_std_flogi *)fchdr; + struct fc_std_els_rjt_rsp *els_rjt = (struct fc_std_els_rjt_rsp *)fchdr; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + u64 fdmi_tov; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + + if (iport->active_oxid_fdmi_plogi != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. state: %d, oxid recvd: 0x%x, active oxid: 0x%x\n", + fdls_get_state(fdls), oxid, iport->active_oxid_fdmi_plogi); + return; + } + + iport->fabric.fdmi_pending &= ~FDLS_FDMI_PLOGI_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + + if (ntoh24(fchdr->fh_s_id) == FC_FID_MGMT_SERV) { + del_timer_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + switch (plogi_rsp->els.fl_cmd) { + case ELS_LS_ACC: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process fdmi PLOGI response status: ELS_LS_ACC\n"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Sending fdmi registration for port 0x%x\n", + iport->fcid); + + fdls_fdmi_register_hba(iport); + fdls_fdmi_register_pa(iport); + fdmi_tov = jiffies + msecs_to_jiffies(5000); + mod_timer(&iport->fabric.fdmi_timer, + round_jiffies(fdmi_tov)); + break; + case ELS_LS_RJT: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Fabric FDMI PLOGI returned ELS_LS_RJT reason: 0x%x", + els_rjt->rej.er_reason); + + if (((els_rjt->rej.er_reason == ELS_RJT_BUSY) + || (els_rjt->rej.er_reason == ELS_RJT_UNAB)) + && (iport->fabric.fdmi_retry < 7)) { + iport->fabric.fdmi_retry++; + fdls_send_fdmi_plogi(iport); + } + break; + default: + break; + } + } +} +static void fdls_process_fdmi_reg_ack(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr, + int rsp_type) +{ + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + if (!iport->fabric.fdmi_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received FDMI ack while not waiting: 0x%x\n", + FNIC_STD_GET_OX_ID(fchdr)); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + + if ((iport->active_oxid_fdmi_rhba != oxid) && + (iport->active_oxid_fdmi_rpa != oxid)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Incorrect OXID in response. oxid recvd: 0x%x, active oxids(rhba,rpa): 0x%x, 0x%x\n", + oxid, iport->active_oxid_fdmi_rhba, iport->active_oxid_fdmi_rpa); + return; + } + if (FNIC_FRAME_TYPE(oxid) == FNIC_FRAME_TYPE_FDMI_RHBA) { + iport->fabric.fdmi_pending &= ~FDLS_FDMI_REG_HBA_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + } else { + iport->fabric.fdmi_pending &= ~FDLS_FDMI_RPA_PENDING; + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Received FDMI registration ack\n", + iport->fcid); + + if (!iport->fabric.fdmi_pending) { + del_timer_sync(&iport->fabric.fdmi_timer); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport fcid: 0x%x: Canceling FDMI timer\n", + iport->fcid); + } +} + +static void fdls_process_fdmi_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + s_id = ntoh24(FNIC_STD_GET_S_ID(fchdr)); + + if (!(s_id != FC_FID_MGMT_SERV)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid SID: 0x%x. Dropping frame", + s_id); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + + switch (FNIC_FRAME_TYPE(oxid)) { + case FNIC_FRAME_TYPE_FDMI_PLOGI: + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_plogi); + break; + case FNIC_FRAME_TYPE_FDMI_RHBA: + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rhba); + break; + case FNIC_FRAME_TYPE_FDMI_RPA: + fdls_free_oxid(iport, oxid, &iport->active_oxid_fdmi_rpa); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid oxid: 0x%x. Dropping frame", + oxid); + break; + } + + del_timer_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending &= ~FDLS_FDMI_ABORT_PENDING; + + fdls_send_fdmi_plogi(iport); +} + +static void +fdls_process_fabric_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fc_std_abts_ba_acc *ba_acc = (struct fc_std_abts_ba_acc *)fchdr; + struct fc_std_abts_ba_rjt *ba_rjt; + uint32_t fabric_state = iport->fabric.state; + struct fnic *fnic = iport->fnic; + int frame_type; + uint16_t oxid; + + s_id = ntoh24(fchdr->fh_s_id); + ba_rjt = (struct fc_std_abts_ba_rjt *) fchdr; + + if (!((s_id == FC_FID_DIR_SERV) || (s_id == FC_FID_FLOGI) + || (s_id == FC_FID_FCTRL))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid SID: 0x%x. Dropping frame", + s_id); + return; + } + + oxid = FNIC_STD_GET_OX_ID(fchdr); + if (iport->active_oxid_fabric_req != oxid) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp with invalid oxid: 0x%x. Dropping frame", + oxid); + return; + } + + if (iport->fabric.timer_pending) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Canceling fabric disc timer %p\n", iport); + fnic_del_fabric_timer_sync(fnic); + } + iport->fabric.timer_pending = 0; + iport->fabric.flags &= ~FNIC_FDLS_FABRIC_ABORT_ISSUED; + + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abts rsp BA_ACC for fabric_state: %d OX_ID: 0x%x", + fabric_state, be16_to_cpu(ba_acc->acc.ba_ox_id)); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "BA_RJT fs: %d OX_ID: 0x%x rc: 0x%x rce: 0x%x", + fabric_state, FNIC_STD_GET_OX_ID(&ba_rjt->fchdr), + ba_rjt->rjt.br_reason, ba_rjt->rjt.br_explan); + } + + frame_type = FNIC_FRAME_TYPE(oxid); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + + /* currently error handling/retry logic is same for ABTS BA_ACC & BA_RJT */ + switch (frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + if (iport->fabric.retry_counter < iport->max_flogi_retries) + fdls_send_fabric_flogi(iport); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max FLOGI retries"); + break; + case FNIC_FRAME_TYPE_FABRIC_LOGO: + if (iport->fabric.retry_counter < FABRIC_LOGO_MAX_RETRY) + fdls_send_fabric_logo(iport); + break; + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + if (iport->fabric.retry_counter < iport->max_plogi_retries) + fdls_send_fabric_plogi(iport); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Exceeded max PLOGI retries"); + break; + case FNIC_FRAME_TYPE_FABRIC_RPN: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_rpn_id(iport); + else + /* go back to fabric Plogi */ + fnic_fdls_start_plogi(iport); + break; + case FNIC_FRAME_TYPE_FABRIC_SCR: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_scr(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "SCR exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_RFT: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_register_fc4_types(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFT exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_RFF: + if (iport->fabric.retry_counter < FDLS_RETRY_COUNT) + fdls_send_register_fc4_features(iport); + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RFF exhausted retries. Start fabric PLOGI %p", + iport); + fnic_fdls_start_plogi(iport); /* go back to fabric Plogi */ + } + break; + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + if (iport->fabric.retry_counter <= FDLS_RETRY_COUNT) + fdls_send_gpn_ft(iport, fabric_state); + else + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "GPN FT exhausted retries. Start fabric PLOGI %p", + iport); + break; + default: + /* + * We should not be here since we already validated rx oxid with + * our active_oxid_fabric_req + */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Invalid OXID/active oxid 0x%x\n", oxid); + WARN_ON(true); + return; + } +} + +static void +fdls_process_abts_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_abts_ba_acc *pba_acc; + uint32_t nport_id; + uint16_t oxid = FNIC_STD_GET_OX_ID(fchdr); + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_abts_ba_acc); + + nport_id = ntoh24(fchdr->fh_s_id); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received abort from SID 0x%8x", nport_id); + + tport = fnic_find_tport_by_fcid(iport, nport_id); + if (tport) { + if (tport->active_oxid == oxid) { + tport->flags |= FNIC_FDLS_TGT_ABORT_ISSUED; + fdls_free_oxid(iport, oxid, &tport->active_oxid); + } + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "0x%x: Failed to allocate frame to send response for ABTS req", + iport->fcid); + return; + } + + pba_acc = (struct fc_std_abts_ba_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + *pba_acc = (struct fc_std_abts_ba_acc) { + .fchdr = {.fh_r_ctl = FC_RCTL_BA_ACC, + .fh_f_ctl = {FNIC_FCP_RSP_FCTL, 0, 0}}, + .acc = {.ba_low_seq_cnt = 0, .ba_high_seq_cnt = cpu_to_be16(0xFFFF)} + }; + + FNIC_STD_SET_S_ID(pba_acc->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pba_acc->fchdr, fchdr->fh_s_id); + FNIC_STD_SET_OX_ID(pba_acc->fchdr, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_STD_SET_RX_ID(pba_acc->fchdr, FNIC_STD_GET_RX_ID(fchdr)); + + pba_acc->acc.ba_rx_id = cpu_to_be16(FNIC_STD_GET_RX_ID(fchdr)); + pba_acc->acc.ba_ox_id = cpu_to_be16(FNIC_STD_GET_OX_ID(fchdr)); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS send BA ACC with oxid: 0x%x", + iport->fcid, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_unsupported_els_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_els_rjt_rsp *pls_rsp; + uint16_t oxid; + uint32_t d_id = ntoh24(fchdr->fh_d_id); + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + + if (iport->fcid != d_id) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping unsupported ELS with illegal frame bits 0x%x\n", + d_id); + atomic64_inc(&iport->iport_stats.unsupported_frames_dropped); + return; + } + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping unsupported ELS request in iport state: %d", + iport->state); + atomic64_inc(&iport->iport_stats.unsupported_frames_dropped); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send response to unsupported ELS request"); + return; + } + + pls_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(frame, iport); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Process unsupported ELS request from SID: 0x%x", + iport->fcid, ntoh24(fchdr->fh_s_id)); + + /* We don't support this ELS request, send a reject */ + pls_rsp->rej.er_reason = 0x0B; + pls_rsp->rej.er_explan = 0x0; + pls_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(pls_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pls_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pls_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_rls_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_rls_acc *prls_acc_rsp; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_rls_acc); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process RLS request %d", iport->fnic->fnic_num); + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RLS req in iport state: %d. Dropping the frame.", + iport->state); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send RLS accept"); + return; + } + prls_acc_rsp = (struct fc_std_rls_acc *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + FNIC_STD_SET_S_ID(prls_acc_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(prls_acc_rsp->fchdr, fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(prls_acc_rsp->fchdr, oxid); + FNIC_STD_SET_RX_ID(prls_acc_rsp->fchdr, FNIC_UNASSIGNED_RXID); + + FNIC_STD_SET_F_CTL(prls_acc_rsp->fchdr, FNIC_ELS_REP_FCTL << 16); + FNIC_STD_SET_R_CTL(prls_acc_rsp->fchdr, FC_RCTL_ELS_REP); + FNIC_STD_SET_TYPE(prls_acc_rsp->fchdr, FC_TYPE_ELS); + + prls_acc_rsp->els.rls_cmd = ELS_LS_ACC; + prls_acc_rsp->els.rls_lesb.lesb_link_fail = + cpu_to_be32(iport->fnic->link_down_cnt); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_els_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr, + uint32_t len) +{ + uint8_t *frame; + struct fc_std_els_acc_rsp *pels_acc; + uint16_t oxid; + uint8_t *fc_payload; + uint8_t type; + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET; + + fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); + type = *fc_payload; + + if ((iport->state != FNIC_IPORT_STATE_READY) + && (iport->state != FNIC_IPORT_STATE_FABRIC_DISC)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping ELS frame type: 0x%x in iport state: %d", + type, iport->state); + return; + } + switch (type) { + case ELS_ECHO: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for ECHO request %d\n", + iport->fnic->fnic_num); + break; + + case ELS_RRQ: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for RRQ request %d\n", + iport->fnic->fnic_num); + break; + + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "sending LS_ACC for 0x%x ELS frame\n", type); + break; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send ELS response for 0x%x", + type); + return; + } + + if (type == ELS_ECHO) { + /* Brocade sends a longer payload, copy all frame back */ + memcpy(frame, fchdr, len); + } + + pels_acc = (struct fc_std_els_acc_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_acc_frame(frame, iport); + + FNIC_STD_SET_D_ID(pels_acc->fchdr, fchdr->fh_s_id); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pels_acc->fchdr, oxid); + + if (type == ELS_ECHO) + frame_size += len; + else + frame_size += sizeof(struct fc_std_els_acc_rsp); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_tgt_abts_rsp(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint32_t s_id; + struct fnic_tport_s *tport; + uint32_t tport_state; + struct fc_std_abts_ba_acc *ba_acc; + struct fc_std_abts_ba_rjt *ba_rjt; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + int frame_type; + + s_id = ntoh24(fchdr->fh_s_id); + ba_acc = (struct fc_std_abts_ba_acc *)fchdr; + ba_rjt = (struct fc_std_abts_ba_rjt *)fchdr; + + tport = fnic_find_tport_by_fcid(iport, s_id); + if (!tport) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt abts rsp with invalid SID: 0x%x", s_id); + return; + } + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport 0x%p Canceling fabric disc timer\n", tport); + fnic_del_tport_timer_sync(fnic, tport); + } + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt abts rsp in iport state(%d). Dropping.", + iport->state); + return; + } + tport->timer_pending = 0; + tport->flags &= ~FNIC_FDLS_TGT_ABORT_ISSUED; + tport_state = tport->state; + oxid = FNIC_STD_GET_OX_ID(fchdr); + + /*This abort rsp is for ADISC */ + frame_type = FNIC_FRAME_TYPE(oxid); + switch (frame_type) { + case FNIC_FRAME_TYPE_TGT_ADISC: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "OX_ID: 0x%x tgt_fcid: 0x%x rcvd tgt adisc abts resp BA_ACC", + be16_to_cpu(ba_acc->acc.ba_ox_id), + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC BA_RJT rcvd tport_fcid: 0x%x tport_state: %d ", + tport->fcid, tport_state); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation:0x%x ", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < FDLS_RETRY_COUNT) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_adisc(iport, tport); + return; + } + fdls_free_oxid(iport, oxid, &tport->active_oxid); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC not responding. Deleting target port: 0x%x", + tport->fcid); + fdls_delete_tport(iport, tport); + /* Restart discovery of targets */ + if ((iport->state == FNIC_IPORT_STATE_READY) + && (iport->fabric.state != FDLS_STATE_SEND_GPNFT) + && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) { + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + break; + case FNIC_FRAME_TYPE_TGT_PLOGI: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received tgt PLOGI abts response BA_ACC tgt_fcid: 0x%x", + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PLOGI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x", + tport->fcid, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation: 0x%x", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < iport->max_plogi_retries) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); + return; + } + + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_delete_tport(iport, tport); + /* Restart discovery of targets */ + if ((iport->state == FNIC_IPORT_STATE_READY) + && (iport->fabric.state != FDLS_STATE_SEND_GPNFT) + && (iport->fabric.state != FDLS_STATE_RSCN_GPN_FT)) { + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + break; + case FNIC_FRAME_TYPE_TGT_PRLI: + if (fchdr->fh_r_ctl == FC_RCTL_BA_ACC) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Received tgt PRLI abts response BA_ACC", + tport->fcid); + } else if (fchdr->fh_r_ctl == FC_RCTL_BA_RJT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PRLI BA_RJT received for tport_fcid: 0x%x OX_ID: 0x%x ", + tport->fcid, FNIC_STD_GET_OX_ID(fchdr)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "reason code: 0x%x reason code explanation: 0x%x", + ba_rjt->rjt.br_reason, + ba_rjt->rjt.br_explan); + } + if ((tport->retry_counter < FDLS_RETRY_COUNT) + && (fchdr->fh_r_ctl == FC_RCTL_BA_ACC)) { + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_prli(iport, tport); + return; + } + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_send_tgt_plogi(iport, tport); /* go back to plogi */ + fdls_set_tport_state(tport, FDLS_TGT_STATE_PLOGI); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS response for unknown frame %p", iport); + break; + } + +} + +static void +fdls_process_plogi_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t *frame; + struct fc_std_els_rjt_rsp *pplogi_rsp; + uint16_t oxid; + uint32_t d_id = ntoh24(fchdr->fh_d_id); + struct fnic *fnic = iport->fnic; + uint16_t frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + + if (iport->fcid != d_id) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received PLOGI with illegal frame bits. Dropping frame from 0x%x", + d_id); + return; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received PLOGI request in iport state: %d Dropping frame", + iport->state); + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send response to PLOGI request"); + return; + } + + pplogi_rsp = (struct fc_std_els_rjt_rsp *) (frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(frame, iport); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: Process PLOGI request from SID: 0x%x", + iport->fcid, ntoh24(fchdr->fh_s_id)); + + /* We don't support PLOGI request, send a reject */ + pplogi_rsp->rej.er_reason = 0x0B; + pplogi_rsp->rej.er_explan = 0x0; + pplogi_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(pplogi_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(pplogi_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(pplogi_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, frame, frame_size); +} + +static void +fdls_process_logo_req(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + struct fc_std_logo *logo = (struct fc_std_logo *)fchdr; + uint32_t nport_id; + uint64_t nport_name; + struct fnic_tport_s *tport; + struct fnic *fnic = iport->fnic; + uint16_t oxid; + + nport_id = ntoh24(logo->els.fl_n_port_id); + nport_name = be64_to_cpu(logo->els.fl_n_port_wwn); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process LOGO request from fcid: 0x%x", nport_id); + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dropping LOGO req from 0x%x in iport state: %d", + nport_id, iport->state); + return; + } + + tport = fnic_find_tport_by_fcid(iport, nport_id); + + if (!tport) { + /* We are not logged in with the nport, log and drop... */ + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received LOGO from an nport not logged in: 0x%x(0x%llx)", + nport_id, nport_name); + return; + } + if (tport->fcid != nport_id) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Received LOGO with invalid target port fcid: 0x%x(0x%llx)", + nport_id, nport_name); + return; + } + if (tport->timer_pending) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport fcid 0x%x: Canceling disc timer\n", + tport->fcid); + fnic_del_tport_timer_sync(fnic, tport); + tport->timer_pending = 0; + } + + /* got a logo in response to adisc to a target which has logged out */ + if (tport->state == FDLS_TGT_STATE_ADISC) { + tport->retry_counter = 0; + oxid = tport->active_oxid; + fdls_free_oxid(iport, oxid, &tport->active_oxid); + fdls_delete_tport(iport, tport); + fdls_send_logo_resp(iport, &logo->fchdr); + if ((iport->state == FNIC_IPORT_STATE_READY) + && (fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) + && (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Sending GPNFT in response to LOGO from Target:0x%x", + nport_id); + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + return; + } + } else { + fdls_delete_tport(iport, tport); + } + if (iport->state == FNIC_IPORT_STATE_READY) { + fdls_send_logo_resp(iport, &logo->fchdr); + if ((fdls_get_state(&iport->fabric) != FDLS_STATE_SEND_GPNFT) && + (fdls_get_state(&iport->fabric) != FDLS_STATE_RSCN_GPN_FT)) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Sending GPNFT in response to LOGO from Target:0x%x", + nport_id); + fdls_send_gpn_ft(iport, FDLS_STATE_SEND_GPNFT); + } + } +} + +static void +fdls_process_rscn(struct fnic_iport_s *iport, struct fc_frame_header *fchdr) +{ + struct fc_std_rscn *rscn; + struct fc_els_rscn_page *rscn_port = NULL; + int num_ports; + struct fnic_tport_s *tport, *next; + uint32_t nport_id; + uint8_t fcid[3]; + int newports = 0; + struct fnic_fdls_fabric_s *fdls = &iport->fabric; + struct fnic *fnic = iport->fnic; + int rscn_type = NOT_PC_RSCN; + uint32_t sid = ntoh24(fchdr->fh_s_id); + unsigned long reset_fnic_list_lock_flags = 0; + uint16_t rscn_payload_len; + + atomic64_inc(&iport->iport_stats.num_rscns); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process RSCN %p", iport); + + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS RSCN received in state(%d). Dropping", + fdls_get_state(fdls)); + return; + } + + rscn = (struct fc_std_rscn *)fchdr; + rscn_payload_len = be16_to_cpu(rscn->els.rscn_plen); + + /* frame validation */ + if ((rscn_payload_len % 4 != 0) || (rscn_payload_len < 8) + || (rscn_payload_len > 1024) + || (rscn->els.rscn_page_len != 4)) { + num_ports = 0; + if ((rscn_payload_len == 0xFFFF) + && (sid == FC_FID_FCTRL)) { + rscn_type = PC_RSCN; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "pcrscn: PCRSCN received. sid: 0x%x payload len: 0x%x", + sid, rscn_payload_len); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN payload_len: 0x%x page_len: 0x%x", + rscn_payload_len, rscn->els.rscn_page_len); + /* if this happens then we need to send ADISC to all the tports. */ + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN for port id: 0x%x", tport->fcid); + } + } /* end else */ + } else { + num_ports = (rscn_payload_len - 4) / rscn->els.rscn_page_len; + rscn_port = (struct fc_els_rscn_page *)(rscn + 1); + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN received for num_ports: %d payload_len: %d page_len: %d ", + num_ports, rscn_payload_len, rscn->els.rscn_page_len); + + /* + * RSCN have at least one Port_ID page , but may not have any port_id + * in it. If no port_id is specified in the Port_ID page , we send + * ADISC to all the tports + */ + + while (num_ports) { + + memcpy(fcid, rscn_port->rscn_fid, 3); + + nport_id = ntoh24(fcid); + rscn_port++; + num_ports--; + /* if this happens then we need to send ADISC to all the tports. */ + if (nport_id == 0) { + list_for_each_entry_safe(tport, next, &iport->tport_list, + links) { + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN for port id: 0x%x", tport->fcid); + } + break; + } + tport = fnic_find_tport_by_fcid(iport, nport_id); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "RSCN port id list: 0x%x", nport_id); + + if (!tport) { + newports++; + continue; + } + if (tport->state == FDLS_TGT_STATE_READY) + tport->flags |= FNIC_FDLS_TPORT_SEND_ADISC; + } + + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON && + rscn_type == PC_RSCN && fnic->role == FNIC_ROLE_FCP_INITIATOR) { + + if (fnic->pc_rscn_handling_status == PC_RSCN_HANDLING_IN_PROGRESS) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PCRSCN handling already in progress. Skip host reset: %d", + iport->fnic->fnic_num); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Processing PCRSCN. Queuing fnic for host reset: %d", + iport->fnic->fnic_num); + fnic->pc_rscn_handling_status = PC_RSCN_HANDLING_IN_PROGRESS; + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + + spin_lock_irqsave(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + list_add_tail(&fnic->links, &reset_fnic_list); + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + + queue_work(reset_fnic_work_queue, &reset_fnic_work); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FDLS process RSCN sending GPN_FT: newports: %d", newports); + fdls_send_gpn_ft(iport, FDLS_STATE_RSCN_GPN_FT); + fdls_send_rscn_resp(iport, fchdr); + } +} + +void fnic_fdls_disc_start(struct fnic_iport_s *iport) +{ + struct fnic *fnic = iport->fnic; + + fc_host_fabric_name(iport->fnic->host) = 0; + fc_host_post_event(iport->fnic->host, fc_get_event_number(), + FCH_EVT_LIPRESET, 0); + + if (!iport->usefip) { + if (iport->flags & FNIC_FIRST_LINK_UP) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_scsi_fcpio_reset(iport->fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + iport->flags &= ~FNIC_FIRST_LINK_UP; + } + fnic_fdls_start_flogi(iport); + } else + fnic_fdls_start_plogi(iport); +} + +static void +fdls_process_adisc_req(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + struct fc_std_els_adisc *padisc_acc; + struct fc_std_els_adisc *adisc_req = (struct fc_std_els_adisc *)fchdr; + uint64_t frame_wwnn; + uint64_t frame_wwpn; + uint32_t tgt_fcid; + struct fnic_tport_s *tport; + uint8_t *fcid; + uint8_t *rjt_frame; + uint8_t *acc_frame; + struct fc_std_els_rjt_rsp *prjts_rsp; + uint16_t oxid; + struct fnic *fnic = iport->fnic; + uint16_t rjt_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_rjt_rsp); + uint16_t acc_frame_size = FNIC_ETH_FCOE_HDRS_OFFSET + + sizeof(struct fc_std_els_adisc); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Process ADISC request %d", iport->fnic->fnic_num); + + fcid = FNIC_STD_GET_S_ID(fchdr); + tgt_fcid = ntoh24(fcid); + tport = fnic_find_tport_by_fcid(iport, tgt_fcid); + if (!tport) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport for fcid: 0x%x not found. Dropping ADISC req.", + tgt_fcid); + return; + } + if (iport->state != FNIC_IPORT_STATE_READY) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dropping ADISC req from fcid: 0x%x in iport state: %d", + tgt_fcid, iport->state); + return; + } + + frame_wwnn = be64_to_cpu(adisc_req->els.adisc_wwnn); + frame_wwpn = be64_to_cpu(adisc_req->els.adisc_wwpn); + + if ((frame_wwnn != tport->wwnn) || (frame_wwpn != tport->wwpn)) { + /* send reject */ + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "ADISC req from fcid: 0x%x mismatch wwpn: 0x%llx wwnn: 0x%llx", + tgt_fcid, frame_wwpn, frame_wwnn); + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "local tport wwpn: 0x%llx wwnn: 0x%llx. Sending RJT", + tport->wwpn, tport->wwnn); + + rjt_frame = fdls_alloc_frame(iport); + if (rjt_frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate rjt_frame to send response to ADISC request"); + return; + } + + prjts_rsp = (struct fc_std_els_rjt_rsp *) (rjt_frame + FNIC_ETH_FCOE_HDRS_OFFSET); + fdls_init_els_rjt_frame(rjt_frame, iport); + + prjts_rsp->rej.er_reason = 0x03; /* logical error */ + prjts_rsp->rej.er_explan = 0x1E; /* N_port login required */ + prjts_rsp->rej.er_vendor = 0x0; + + FNIC_STD_SET_S_ID(prjts_rsp->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(prjts_rsp->fchdr, fchdr->fh_s_id); + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(prjts_rsp->fchdr, oxid); + + fnic_send_fcoe_frame(iport, rjt_frame, rjt_frame_size); + return; + } + + acc_frame = fdls_alloc_frame(iport); + if (acc_frame == NULL) { + FNIC_FCS_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send ADISC accept"); + return; + } + + padisc_acc = (struct fc_std_els_adisc *) (acc_frame + FNIC_ETH_FCOE_HDRS_OFFSET); + + FNIC_STD_SET_S_ID(padisc_acc->fchdr, fchdr->fh_d_id); + FNIC_STD_SET_D_ID(padisc_acc->fchdr, fchdr->fh_s_id); + + FNIC_STD_SET_F_CTL(padisc_acc->fchdr, FNIC_ELS_REP_FCTL << 16); + FNIC_STD_SET_R_CTL(padisc_acc->fchdr, FC_RCTL_ELS_REP); + FNIC_STD_SET_TYPE(padisc_acc->fchdr, FC_TYPE_ELS); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + FNIC_STD_SET_OX_ID(padisc_acc->fchdr, oxid); + FNIC_STD_SET_RX_ID(padisc_acc->fchdr, FNIC_UNASSIGNED_RXID); + + padisc_acc->els.adisc_cmd = ELS_LS_ACC; + + FNIC_STD_SET_NPORT_NAME(&padisc_acc->els.adisc_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&padisc_acc->els.adisc_wwnn, + iport->wwnn); + memcpy(padisc_acc->els.adisc_port_id, fchdr->fh_d_id, 3); + + fnic_send_fcoe_frame(iport, acc_frame, acc_frame_size); +} + +/* + * Performs a validation for all FCOE frames and return the frame type + */ +int +fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr) +{ + uint8_t type; + uint8_t *fc_payload; + uint16_t oxid; + uint32_t s_id; + uint32_t d_id; + struct fnic *fnic = iport->fnic; + struct fnic_fdls_fabric_s *fabric = &iport->fabric; + int oxid_frame_type; + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fc_payload = (uint8_t *) fchdr + sizeof(struct fc_frame_header); + type = *fc_payload; + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + + /* some common validation */ + if (fdls_get_state(fabric) > FDLS_STATE_FABRIC_FLOGI) { + if ((iport->fcid != d_id) || (!FNIC_FC_FRAME_CS_CTL(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "invalid frame received. Dropping frame"); + return -1; + } + } + + /* BLS ABTS response */ + if ((fchdr->fh_r_ctl == FC_RCTL_BA_ACC) + || (fchdr->fh_r_ctl == FC_RCTL_BA_RJT)) { + if (!(FNIC_FC_FRAME_TYPE_BLS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS invalid frame. Dropping frame"); + return -1; + + } + if (fdls_is_oxid_fabric_req(oxid)) { + if (!(iport->fabric.flags & FNIC_FDLS_FABRIC_ABORT_ISSUED)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unexpected ABTS RSP(oxid:0x%x) from 0x%x. Dropping frame", + oxid, s_id); + return -1; + } + return FNIC_FABRIC_BLS_ABTS_RSP; + } else if (fdls_is_oxid_fdmi_req(oxid)) { + return FNIC_FDMI_BLS_ABTS_RSP; + } else if (fdls_is_oxid_tgt_req(oxid)) { + return FNIC_TPORT_BLS_ABTS_RSP; + } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received ABTS rsp with unknown oxid(0x%x) from 0x%x. Dropping frame", + oxid, s_id); + return -1; + } + + /* BLS ABTS Req */ + if ((fchdr->fh_r_ctl == FC_RCTL_BA_ABTS) + && (FNIC_FC_FRAME_TYPE_BLS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Receiving Abort Request from s_id: 0x%x", s_id); + return FNIC_BLS_ABTS_REQ; + } + + /* unsolicited requests frames */ + if (FNIC_FC_FRAME_UNSOLICITED(fchdr)) { + switch (type) { + case ELS_LOGO: + if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) + || (!FNIC_FC_FRAME_UNSOLICITED(fchdr)) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received LOGO invalid frame. Dropping frame"); + return -1; + } + return FNIC_ELS_LOGO_REQ; + case ELS_RSCN: + if ((!FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(fchdr)) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) + || (!FNIC_FC_FRAME_UNSOLICITED(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RSCN invalid FCTL. Dropping frame"); + return -1; + } + if (s_id != FC_FID_FCTRL) + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received RSCN from target FCTL: 0x%x type: 0x%x s_id: 0x%x.", + fchdr->fh_f_ctl[0], fchdr->fh_type, s_id); + return FNIC_ELS_RSCN_REQ; + case ELS_PLOGI: + return FNIC_ELS_PLOGI_REQ; + case ELS_ECHO: + return FNIC_ELS_ECHO_REQ; + case ELS_ADISC: + return FNIC_ELS_ADISC; + case ELS_RLS: + return FNIC_ELS_RLS; + case ELS_RRQ: + return FNIC_ELS_RRQ; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unsupported frame (type:0x%02x) from fcid: 0x%x", + type, s_id); + return FNIC_ELS_UNSUPPORTED_REQ; + } + } + + /* solicited response from fabric or target */ + oxid_frame_type = FNIC_FRAME_TYPE(oxid); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "oxid frame code: 0x%x, oxid: 0x%x\n", oxid_frame_type, oxid); + switch (oxid_frame_type) { + case FNIC_FRAME_TYPE_FABRIC_FLOGI: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_FLOGI) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_FLOGI_RSP; + + case FNIC_FRAME_TYPE_FABRIC_PLOGI: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_DIR_SERV) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_PLOGI_RSP; + + case FNIC_FRAME_TYPE_FABRIC_SCR: + if (type == ELS_LS_ACC) { + if ((s_id != FC_FID_FCTRL) + || (!FNIC_FC_FRAME_TYPE_ELS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + } + return FNIC_FABRIC_SCR_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RPN: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RPN_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RFT: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RFT_RSP; + + case FNIC_FRAME_TYPE_FABRIC_RFF: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_RFF_RSP; + + case FNIC_FRAME_TYPE_FABRIC_GPN_FT: + if ((s_id != FC_FID_DIR_SERV) || (!FNIC_FC_FRAME_TYPE_FC_GS(fchdr))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown frame. Dropping frame"); + return -1; + } + return FNIC_FABRIC_GPN_FT_RSP; + + case FNIC_FRAME_TYPE_FABRIC_LOGO: + return FNIC_FABRIC_LOGO_RSP; + case FNIC_FRAME_TYPE_FDMI_PLOGI: + return FNIC_FDMI_PLOGI_RSP; + case FNIC_FRAME_TYPE_FDMI_RHBA: + return FNIC_FDMI_REG_HBA_RSP; + case FNIC_FRAME_TYPE_FDMI_RPA: + return FNIC_FDMI_RPA_RSP; + case FNIC_FRAME_TYPE_TGT_PLOGI: + return FNIC_TPORT_PLOGI_RSP; + case FNIC_FRAME_TYPE_TGT_PRLI: + return FNIC_TPORT_PRLI_RSP; + case FNIC_FRAME_TYPE_TGT_ADISC: + return FNIC_TPORT_ADISC_RSP; + case FNIC_FRAME_TYPE_TGT_LOGO: + if (!FNIC_FC_FRAME_TYPE_ELS(fchdr)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping Unknown frame in tport solicited exchange range type: 0x%x.", + fchdr->fh_type); + return -1; + } + return FNIC_TPORT_LOGO_RSP; + default: + /* Drop the Rx frame and log/stats it */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Solicited response: unknown OXID: 0x%x", oxid); + return -1; + } + + return -1; +} + +void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, + int len, int fchdr_offset) +{ + struct fc_frame_header *fchdr; + uint32_t s_id = 0; + uint32_t d_id = 0; + struct fnic *fnic = iport->fnic; + int frame_type; + + fchdr = (struct fc_frame_header *) ((uint8_t *) rx_frame + fchdr_offset); + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + + fnic_debug_dump_fc_frame(fnic, fchdr, len, "Incoming"); + + frame_type = + fnic_fdls_validate_and_get_frame_type(iport, fchdr); + + /*if we are in flogo drop everything else */ + if (iport->fabric.state == FDLS_STATE_FABRIC_LOGO && + frame_type != FNIC_FABRIC_LOGO_RSP) + return; + + switch (frame_type) { + case FNIC_FABRIC_FLOGI_RSP: + fdls_process_flogi_rsp(iport, fchdr, rx_frame); + break; + case FNIC_FABRIC_PLOGI_RSP: + fdls_process_fabric_plogi_rsp(iport, fchdr); + break; + case FNIC_FDMI_PLOGI_RSP: + fdls_process_fdmi_plogi_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RPN_RSP: + fdls_process_rpn_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RFT_RSP: + fdls_process_rft_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_RFF_RSP: + fdls_process_rff_id_rsp(iport, fchdr); + break; + case FNIC_FABRIC_SCR_RSP: + fdls_process_scr_rsp(iport, fchdr); + break; + case FNIC_FABRIC_GPN_FT_RSP: + fdls_process_gpn_ft_rsp(iport, fchdr, len); + break; + case FNIC_TPORT_PLOGI_RSP: + fdls_process_tgt_plogi_rsp(iport, fchdr); + break; + case FNIC_TPORT_PRLI_RSP: + fdls_process_tgt_prli_rsp(iport, fchdr); + break; + case FNIC_TPORT_ADISC_RSP: + fdls_process_tgt_adisc_rsp(iport, fchdr); + break; + case FNIC_TPORT_BLS_ABTS_RSP: + fdls_process_tgt_abts_rsp(iport, fchdr); + break; + case FNIC_TPORT_LOGO_RSP: + /* Logo response from tgt which we have deleted */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Logo response from tgt: 0x%x", + ntoh24(fchdr->fh_s_id)); + break; + case FNIC_FABRIC_LOGO_RSP: + fdls_process_fabric_logo_rsp(iport, fchdr); + break; + case FNIC_FABRIC_BLS_ABTS_RSP: + fdls_process_fabric_abts_rsp(iport, fchdr); + break; + case FNIC_FDMI_BLS_ABTS_RSP: + fdls_process_fdmi_abts_rsp(iport, fchdr); + break; + case FNIC_BLS_ABTS_REQ: + fdls_process_abts_req(iport, fchdr); + break; + case FNIC_ELS_UNSUPPORTED_REQ: + fdls_process_unsupported_els_req(iport, fchdr); + break; + case FNIC_ELS_PLOGI_REQ: + fdls_process_plogi_req(iport, fchdr); + break; + case FNIC_ELS_RSCN_REQ: + fdls_process_rscn(iport, fchdr); + break; + case FNIC_ELS_LOGO_REQ: + fdls_process_logo_req(iport, fchdr); + break; + case FNIC_ELS_RRQ: + case FNIC_ELS_ECHO_REQ: + fdls_process_els_req(iport, fchdr, len); + break; + case FNIC_ELS_ADISC: + fdls_process_adisc_req(iport, fchdr); + break; + case FNIC_ELS_RLS: + fdls_process_rls_req(iport, fchdr); + break; + case FNIC_FDMI_REG_HBA_RSP: + case FNIC_FDMI_RPA_RSP: + fdls_process_fdmi_reg_ack(iport, fchdr, frame_type); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "s_id: 0x%x d_did: 0x%x", s_id, d_id); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Received unknown FCoE frame of len: %d. Dropping frame", len); + break; + } +} + +void fnic_fdls_disc_init(struct fnic_iport_s *iport) +{ + fdls_reset_oxid_pool(iport); + fdls_set_state((&iport->fabric), FDLS_STATE_INIT); +} + +void fnic_fdls_link_down(struct fnic_iport_s *iport) +{ + struct fnic_tport_s *tport, *next; + struct fnic *fnic = iport->fnic; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS processing link down", iport->fcid); + + fdls_set_state((&iport->fabric), FDLS_STATE_LINKDOWN); + iport->fabric.flags = 0; + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + fnic_scsi_fcpio_reset(iport->fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "removing rport: 0x%x", tport->fcid); + fdls_delete_tport(iport, tport); + } + + if ((fnic_fdmi_support == 1) && (iport->fabric.fdmi_pending > 0)) { + del_timer_sync(&iport->fabric.fdmi_timer); + iport->fabric.fdmi_pending = 0; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "0x%x: FDLS finish processing link down", iport->fcid); +} diff --git a/drivers/scsi/fnic/fdls_fc.h b/drivers/scsi/fnic/fdls_fc.h new file mode 100644 index 000000000000..012f43afd083 --- /dev/null +++ b/drivers/scsi/fnic/fdls_fc.h @@ -0,0 +1,253 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FDLS_FC_H_ +#define _FDLS_FC_H_ + +/* This file contains the declarations for FC fabric services + * and target discovery + * + * Request and Response for + * 1. FLOGI + * 2. PLOGI to Fabric Controller + * 3. GPN_ID, GPN_FT + * 4. RSCN + * 5. PLOGI to Target + * 6. PRLI to Target + */ + +#include <scsi/scsi.h> +#include <scsi/fc/fc_els.h> +#include <uapi/scsi/fc/fc_fs.h> +#include <uapi/scsi/fc/fc_ns.h> +#include <uapi/scsi/fc/fc_gs.h> +#include <uapi/linux/if_ether.h> +#include <scsi/fc/fc_ms.h> +#include <linux/minmax.h> +#include <linux/if_ether.h> +#include <scsi/fc/fc_encaps.h> +#include <scsi/fc/fc_fcoe.h> + +#define FDLS_MIN_FRAMES (32) +#define FDLS_MIN_FRAME_ELEM (4) +#define FNIC_FCP_SP_RD_XRDY_DIS 0x00000002 +#define FNIC_FCP_SP_TARGET 0x00000010 +#define FNIC_FCP_SP_INITIATOR 0x00000020 +#define FNIC_FCP_SP_CONF_CMPL 0x00000080 +#define FNIC_FCP_SP_RETRY 0x00000100 + +#define FNIC_FC_CONCUR_SEQS (0xFF) +#define FNIC_FC_RO_INFO (0x1F) + +/* Little Endian */ +#define FNIC_UNASSIGNED_OXID (0xffff) +#define FNIC_UNASSIGNED_RXID (0xffff) +#define FNIC_ELS_REQ_FCTL (0x000029) +#define FNIC_ELS_REP_FCTL (0x000099) + +#define FNIC_FCP_RSP_FCTL (0x000099) +#define FNIC_REQ_ABTS_FCTL (0x000009) + +#define FNIC_FC_PH_VER_HI (0x20) +#define FNIC_FC_PH_VER_LO (0x20) +#define FNIC_FC_PH_VER (0x2020) +#define FNIC_FC_B2B_CREDIT (0x0A) +#define FNIC_FC_B2B_RDF_SZ (0x0800) + +#define FNIC_LOGI_RDF_SIZE(_logi) ((_logi).fl_csp.sp_bb_data) +#define FNIC_LOGI_R_A_TOV(_logi) ((_logi).fl_csp.sp_r_a_tov) +#define FNIC_LOGI_E_D_TOV(_logi) ((_logi).fl_csp.sp_e_d_tov) +#define FNIC_LOGI_FEATURES(_logi) (be16_to_cpu((_logi).fl_csp.sp_features)) +#define FNIC_LOGI_PORT_NAME(_logi) ((_logi).fl_wwpn) +#define FNIC_LOGI_NODE_NAME(_logi) ((_logi).fl_wwnn) + +#define FNIC_LOGI_SET_RDF_SIZE(_logi, _rdf_size) \ + (FNIC_LOGI_RDF_SIZE(_logi) = cpu_to_be16(_rdf_size)) +#define FNIC_LOGI_SET_E_D_TOV(_logi, _e_d_tov) \ + (FNIC_LOGI_E_D_TOV(_logi) = cpu_to_be32(_e_d_tov)) +#define FNIC_LOGI_SET_R_A_TOV(_logi, _r_a_tov) \ + (FNIC_LOGI_R_A_TOV(_logi) = cpu_to_be32(_r_a_tov)) + +#define FNIC_STD_SET_S_ID(_fchdr, _sid) memcpy((_fchdr).fh_s_id, _sid, 3) +#define FNIC_STD_SET_D_ID(_fchdr, _did) memcpy((_fchdr).fh_d_id, _did, 3) +#define FNIC_STD_SET_OX_ID(_fchdr, _oxid) ((_fchdr).fh_ox_id = cpu_to_be16(_oxid)) +#define FNIC_STD_SET_RX_ID(_fchdr, _rxid) ((_fchdr).fh_rx_id = cpu_to_be16(_rxid)) + +#define FNIC_STD_SET_R_CTL(_fchdr, _rctl) ((_fchdr).fh_r_ctl = _rctl) +#define FNIC_STD_SET_TYPE(_fchdr, _type) ((_fchdr).fh_type = _type) +#define FNIC_STD_SET_F_CTL(_fchdr, _fctl) \ + put_unaligned_be24(_fctl, &((_fchdr).fh_f_ctl)) + +#define FNIC_STD_SET_NPORT_NAME(_ptr, _wwpn) put_unaligned_be64(_wwpn, _ptr) +#define FNIC_STD_SET_NODE_NAME(_ptr, _wwnn) put_unaligned_be64(_wwnn, _ptr) +#define FNIC_STD_SET_PORT_ID(__req, __portid) \ + memcpy(__req.fr_fid.fp_fid, __portid, 3) +#define FNIC_STD_SET_PORT_NAME(_req, _pName) \ + (put_unaligned_be64(_pName, &_req.fr_wwn)) + +#define FNIC_STD_GET_OX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_ox_id)) +#define FNIC_STD_GET_RX_ID(_fchdr) (be16_to_cpu((_fchdr)->fh_rx_id)) +#define FNIC_STD_GET_S_ID(_fchdr) ((_fchdr)->fh_s_id) +#define FNIC_STD_GET_D_ID(_fchdr) ((_fchdr)->fh_d_id) +#define FNIC_STD_GET_TYPE(_fchdr) ((_fchdr)->fh_type) +#define FNIC_STD_GET_F_CTL(_fchdr) ((_fchdr)->fh_f_ctl) +#define FNIC_STD_GET_R_CTL(_fchdr) ((_fchdr)->fh_r_ctl) + +#define FNIC_STD_GET_FC_CT_CMD(__fcct_hdr) (be16_to_cpu(__fcct_hdr->ct_cmd)) + +#define FNIC_FCOE_MAX_FRAME_SZ (2048) +#define FNIC_FCOE_MIN_FRAME_SZ (280) +#define FNIC_FC_MAX_PAYLOAD_LEN (2048) +#define FNIC_MIN_DATA_FIELD_SIZE (256) + +#define FNIC_FC_EDTOV_NSEC (0x400) +#define FNIC_NSEC_TO_MSEC (0x1000000) +#define FCP_PRLI_FUNC_TARGET (0x0010) + +#define FNIC_FC_R_CTL_SOLICITED_DATA (0x21) +#define FNIC_FC_F_CTL_LAST_END_SEQ (0x98) +#define FNIC_FC_F_CTL_LAST_END_SEQ_INT (0x99) +#define FNIC_FC_F_CTL_FIRST_LAST_SEQINIT (0x29) +#define FNIC_FC_R_CTL_FC4_SCTL (0x03) +#define FNIC_FC_CS_CTL (0x00) + +#define FNIC_FC_FRAME_UNSOLICITED(_fchdr) \ + (_fchdr->fh_r_ctl == FC_RCTL_ELS_REQ) +#define FNIC_FC_FRAME_SOLICITED_DATA(_fchdr) \ + (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_SOLICITED_DATA) +#define FNIC_FC_FRAME_SOLICITED_CTRL_REPLY(_fchdr) \ + (_fchdr->fh_r_ctl == FC_RCTL_ELS_REP) +#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ) +#define FNIC_FC_FRAME_FCTL_LAST_END_SEQ_INT(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_LAST_END_SEQ_INT) +#define FNIC_FC_FRAME_FCTL_FIRST_LAST_SEQINIT(_fchdr) \ + (_fchdr->fh_f_ctl[0] == FNIC_FC_F_CTL_FIRST_LAST_SEQINIT) +#define FNIC_FC_FRAME_FC4_SCTL(_fchdr) \ + (_fchdr->fh_r_ctl == FNIC_FC_R_CTL_FC4_SCTL) +#define FNIC_FC_FRAME_TYPE_BLS(_fchdr) (_fchdr->fh_type == FC_TYPE_BLS) +#define FNIC_FC_FRAME_TYPE_ELS(_fchdr) (_fchdr->fh_type == FC_TYPE_ELS) +#define FNIC_FC_FRAME_TYPE_FC_GS(_fchdr) (_fchdr->fh_type == FC_TYPE_CT) +#define FNIC_FC_FRAME_CS_CTL(_fchdr) (_fchdr->fh_cs_ctl == FNIC_FC_CS_CTL) + +#define FNIC_FC_C3_RDF (0xfff) +#define FNIC_FC_PLOGI_RSP_RDF(_plogi_rsp) \ + (min(_plogi_rsp->u.csp_plogi.b2b_rdf_size, \ + (_plogi_rsp->spc3[4] & FNIC_FC_C3_RDF))) +#define FNIC_FC_PLOGI_RSP_CONCUR_SEQ(_plogi_rsp) \ + (min((uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_csp.sp_tot_seq)), \ + (uint16_t) (be16_to_cpu(_plogi_rsp->els.fl_cssp[2].cp_con_seq) & 0xff))) + +/* FLOGI/PLOGI struct */ +struct fc_std_flogi { + struct fc_frame_header fchdr; + struct fc_els_flogi els; +} __packed; + +struct fc_std_els_acc_rsp { + struct fc_frame_header fchdr; + struct fc_els_ls_acc acc; +} __packed; + +struct fc_std_els_rjt_rsp { + struct fc_frame_header fchdr; + struct fc_els_ls_rjt rej; +} __packed; + +struct fc_std_els_adisc { + struct fc_frame_header fchdr; + struct fc_els_adisc els; +} __packed; + +struct fc_std_rls_acc { + struct fc_frame_header fchdr; + struct fc_els_rls_resp els; +} __packed; + +struct fc_std_abts_ba_acc { + struct fc_frame_header fchdr; + struct fc_ba_acc acc; +} __packed; + +struct fc_std_abts_ba_rjt { + struct fc_frame_header fchdr; + struct fc_ba_rjt rjt; +} __packed; + +struct fc_std_els_prli { + struct fc_frame_header fchdr; + struct fc_els_prli els_prli; + struct fc_els_spp sp; +} __packed; + +struct fc_std_rpn_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rn_id rpn_id; +} __packed; + +struct fc_std_fdmi_rhba { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_fdmi_rhba rhba; +} __packed; + +struct fc_std_fdmi_rpa { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_fdmi_rpa rpa; +} __packed; + +struct fc_std_rft_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rft_id rft_id; +} __packed; + +struct fc_std_rff_id { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_rff_id rff_id; +} __packed; + +struct fc_std_gpn_ft { + struct fc_frame_header fchdr; + struct fc_ct_hdr fc_std_ct_hdr; + struct fc_ns_gid_ft gpn_ft; +} __packed; + +/* Accept CT_IU for GPN_FT */ +struct fc_gpn_ft_rsp_iu { + uint8_t ctrl; + uint8_t fcid[3]; + uint32_t rsvd; + __be64 wwpn; +} __packed; + +struct fc_std_rls { + struct fc_frame_header fchdr; + struct fc_els_rls els; +} __packed; + +struct fc_std_scr { + struct fc_frame_header fchdr; + struct fc_els_scr scr; +} __packed; + +struct fc_std_rscn { + struct fc_frame_header fchdr; + struct fc_els_rscn els; +} __packed; + +struct fc_std_logo { + struct fc_frame_header fchdr; + struct fc_els_logo els; +} __packed; + +#define FNIC_ETH_FCOE_HDRS_OFFSET \ + (sizeof(struct ethhdr) + sizeof(struct fcoe_hdr)) + +#endif /* _FDLS_FC_H */ diff --git a/drivers/scsi/fnic/fip.c b/drivers/scsi/fnic/fip.c new file mode 100644 index 000000000000..7bb85949033f --- /dev/null +++ b/drivers/scsi/fnic/fip.c @@ -0,0 +1,1005 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#include "fnic.h" +#include "fip.h" +#include <linux/etherdevice.h> + +#define FIP_FNIC_RESET_WAIT_COUNT 15 + +/** + * fnic_fcoe_reset_vlans - Free up the list of discovered vlans + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan, *next; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlan_list)) { + list_for_each_entry_safe(vlan, next, &fnic->vlan_list, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Reset vlan complete\n"); +} + +/** + * fnic_fcoe_send_vlan_req - Send FIP vlan request to all FCFs MAC + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u64 vlan_tov; + struct fip_vlan_req *pvlan_req; + uint16_t frame_size = sizeof(struct fip_vlan_req); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send VLAN req"); + return; + } + + fnic_fcoe_reset_vlans(fnic); + + fnic->set_vlan(fnic, 0); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "set vlan done\n"); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "got MAC 0x%x:%x:%x:%x:%x:%x\n", iport->hwmac[0], + iport->hwmac[1], iport->hwmac[2], iport->hwmac[3], + iport->hwmac[4], iport->hwmac[5]); + + pvlan_req = (struct fip_vlan_req *) frame; + *pvlan_req = (struct fip_vlan_req) { + .eth = {.h_dest = FCOE_ALL_FCFS_MAC, + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = {.fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_VLAN), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_VLAN_REQ_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, + .fip_dlen = 2}} + }; + + memcpy(pvlan_req->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(pvlan_req->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + + atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); + + iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Send VLAN req\n"); + fnic_send_fip_frame(iport, frame, frame_size); + + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->retry_fip_timer, round_jiffies(vlan_tov)); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fip timer set\n"); +} + +/** + * fnic_fcoe_process_vlan_resp - Processes the vlan response from one FCF and + * populates VLAN list. + * @fnic: Handle to fnic driver instance + * @fiph: Received FIP frame + * + * Will wait for responses from multiple FCFs until timeout. + */ +void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fip_vlan_notif *vlan_notif = (struct fip_vlan_notif *)fiph; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + u16 vid; + int num_vlan = 0; + int cur_desc, desc_len; + struct fcoe_vlan *vlan; + struct fip_vlan_desc *vlan_desc; + unsigned long flags; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p got vlan resp\n", fnic); + + desc_len = be16_to_cpu(vlan_notif->fip.fip_dl_len); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "desc_len %d\n", desc_len); + + spin_lock_irqsave(&fnic->vlans_lock, flags); + + cur_desc = 0; + while (desc_len > 0) { + vlan_desc = + (struct fip_vlan_desc *)(((char *)vlan_notif->vlans_desc) + + cur_desc * 4); + + if (vlan_desc->fd_desc.fip_dtype == FIP_DT_VLAN) { + if (vlan_desc->fd_desc.fip_dlen != 1) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid descriptor length(%x) in VLan response\n", + vlan_desc->fd_desc.fip_dlen); + + } + num_vlan++; + vid = be16_to_cpu(vlan_desc->fd_vlan); + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); + + if (!vlan) { + /* retry from timer */ + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Mem Alloc failure\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlan_list); + break; + } + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid descriptor type(%x) in VLan response\n", + vlan_desc->fd_desc.fip_dtype); + /* + * Note : received a type=2 descriptor here i.e. FIP + * MAC Address Descriptor + */ + cur_desc += vlan_desc->fd_desc.fip_dlen; + desc_len -= vlan_desc->fd_desc.fip_dlen; + } + + /* any VLAN descriptors present ? */ + if (num_vlan == 0) { + atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p No VLAN descriptors in FIP VLAN response\n", + fnic); + } + + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + out: + return; +} + +/** + * fnic_fcoe_start_fcf_discovery - Start FIP FCF discovery in a selected vlan + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_start_fcf_discovery(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + u64 fcs_tov; + struct fip_discovery *pdisc_sol; + uint16_t frame_size = sizeof(struct fip_discovery); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to start FCF discovery"); + return; + } + + memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); + + pdisc_sol = (struct fip_discovery *) frame; + *pdisc_sol = (struct fip_discovery) { + .eth = {.h_dest = FCOE_ALL_FCFS_MAC, + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), .fip_op = cpu_to_be16(FIP_OP_DISC), + .fip_subcode = FIP_SC_REQ, .fip_dl_len = cpu_to_be16(FIP_DISC_SOL_LEN), + .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, + .name_desc = {.fd_desc = {.fip_dtype = FIP_DT_NAME, .fip_dlen = 3}}, + .fcoe_desc = {.fd_desc = {.fip_dtype = FIP_DT_FCOE_SIZE, .fip_dlen = 1}, + .fd_size = cpu_to_be16(FCOE_MAX_SIZE)} + }; + + memcpy(pdisc_sol->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(pdisc_sol->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + iport->selected_fcf.fcf_priority = 0xFF; + + FNIC_STD_SET_NODE_NAME(&pdisc_sol->name_desc.fd_wwn, iport->wwnn); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Start FCF discovery\n"); + fnic_send_fip_frame(iport, frame, frame_size); + + iport->fip.state = FDLS_FIP_FCF_DISCOVERY_STARTED; + + fcs_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FCS_TOV); + mod_timer(&fnic->retry_fip_timer, round_jiffies(fcs_tov)); +} + +/** + * fnic_fcoe_fip_discovery_resp - Processes FCF advertisements. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * FCF advertisements can be: + * solicited - Sent in response of a discover FCF FIP request + * Store the information of the FCF with highest priority. + * Wait until timeout in case of multiple FCFs. + * + * unsolicited - Sent periodically by the FCF for keep alive. + * If FLOGI is in progress or completed and the advertisement is + * received by our selected FCF, refresh the keep alive timer. + */ +void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_disc_adv *disc_adv = (struct fip_disc_adv *)fiph; + u64 fcs_ka_tov; + u64 tov; + int fka_has_changed; + + switch (iport->fip.state) { + case FDLS_FIP_FCF_DISCOVERY_STARTED: + if (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p Solicited adv\n", fnic); + + if ((disc_adv->prio_desc.fd_pri < + iport->selected_fcf.fcf_priority) + && (be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_AVAIL)) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p FCF Available\n", fnic); + memcpy(iport->selected_fcf.fcf_mac, + disc_adv->mac_desc.fd_mac, ETH_ALEN); + iport->selected_fcf.fcf_priority = + disc_adv->prio_desc.fd_pri; + iport->selected_fcf.fka_adv_period = + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "adv time %d", + iport->selected_fcf.fka_adv_period); + iport->selected_fcf.ka_disabled = + (disc_adv->fka_adv_desc.fd_flags & 1); + } + } + break; + case FDLS_FIP_FLOGI_STARTED: + case FDLS_FIP_FLOGI_COMPLETE: + if (!(be16_to_cpu(disc_adv->fip.fip_flags) & FIP_FL_SOL)) { + /* same fcf */ + if (memcmp + (iport->selected_fcf.fcf_mac, + disc_adv->mac_desc.fd_mac, ETH_ALEN) == 0) { + if (iport->selected_fcf.fka_adv_period != + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period)) { + iport->selected_fcf.fka_adv_period = + be32_to_cpu(disc_adv->fka_adv_desc.fd_fka_period); + FNIC_FIP_DBG(KERN_INFO, + fnic->host, + fnic->fnic_num, + "change fka to %d", + iport->selected_fcf.fka_adv_period); + } + + fka_has_changed = + (iport->selected_fcf.ka_disabled == 1) + && ((disc_adv->fka_adv_desc.fd_flags & 1) == + 0); + + iport->selected_fcf.ka_disabled = + (disc_adv->fka_adv_desc.fd_flags & 1); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == + 0))) { + + fcs_ka_tov = jiffies + + 3 + * + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->fcs_ka_timer, + round_jiffies(fcs_ka_tov)); + } else { + if (timer_pending(&fnic->fcs_ka_timer)) + del_timer_sync(&fnic->fcs_ka_timer); + } + + if (fka_has_changed) { + if (iport->selected_fcf.fka_adv_period != 0) { + tov = + jiffies + + msecs_to_jiffies( + iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, + round_jiffies(tov)); + + tov = + jiffies + + msecs_to_jiffies + (FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, + round_jiffies(tov)); + } + } + } + } + break; + default: + break; + } /* end switch */ +} + +/** + * fnic_fcoe_start_flogi - Send FIP FLOGI to the selected FCF + * @fnic: Handle to fnic driver instance + */ +void fnic_fcoe_start_flogi(struct fnic *fnic) +{ + uint8_t *frame; + struct fnic_iport_s *iport = &fnic->iport; + struct fip_flogi *pflogi_req; + u64 flogi_tov; + uint16_t oxid; + uint16_t frame_size = sizeof(struct fip_flogi); + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to start FIP FLOGI"); + return; + } + + pflogi_req = (struct fip_flogi *) frame; + *pflogi_req = (struct fip_flogi) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_LS), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_FLOGI_LEN), + .fip_flags = cpu_to_be16(FIP_FL_FPMA)}, + .flogi_desc = { + .fd_desc = {.fip_dtype = FIP_DT_FLOGI, .fip_dlen = 36}, + .flogi = { + .fchdr = { + .fh_r_ctl = FC_RCTL_ELS_REQ, + .fh_d_id = {0xFF, 0xFF, 0xFE}, + .fh_type = FC_TYPE_ELS, + .fh_f_ctl = {FNIC_ELS_REQ_FCTL, 0, 0}, + .fh_rx_id = cpu_to_be16(FNIC_UNASSIGNED_RXID)}, + .els = { + .fl_cmd = ELS_FLOGI, + .fl_csp = { + .sp_hi_ver = + FNIC_FC_PH_VER_HI, + .sp_lo_ver = + FNIC_FC_PH_VER_LO, + .sp_bb_cred = + cpu_to_be16 + (FNIC_FC_B2B_CREDIT), + .sp_bb_data = + cpu_to_be16 + (FNIC_FC_B2B_RDF_SZ)}, + .fl_cssp[2].cp_class = + cpu_to_be16(FC_CPC_VALID | FC_CPC_SEQ) + }, + } + }, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} + }; + + memcpy(pflogi_req->eth.h_source, iport->hwmac, ETH_ALEN); + if (iport->usefip) + memcpy(pflogi_req->eth.h_dest, iport->selected_fcf.fcf_mac, + ETH_ALEN); + + oxid = fdls_alloc_oxid(iport, FNIC_FRAME_TYPE_FABRIC_FLOGI, + &iport->active_oxid_fabric_req); + if (oxid == FNIC_UNASSIGNED_OXID) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate OXID to send FIP FLOGI"); + mempool_free(frame, fnic->frame_pool); + return; + } + FNIC_STD_SET_OX_ID(pflogi_req->flogi_desc.flogi.fchdr, oxid); + + FNIC_STD_SET_NPORT_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwpn, + iport->wwpn); + FNIC_STD_SET_NODE_NAME(&pflogi_req->flogi_desc.flogi.els.fl_wwnn, + iport->wwnn); + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FIP start FLOGI\n"); + fnic_send_fip_frame(iport, frame, frame_size); + iport->fip.flogi_retry++; + + iport->fip.state = FDLS_FIP_FLOGI_STARTED; + flogi_tov = jiffies + msecs_to_jiffies(fnic->config.flogi_timeout); + mod_timer(&fnic->retry_fip_timer, round_jiffies(flogi_tov)); +} + +/** + * fnic_fcoe_process_flogi_resp - Processes FLOGI response from FCF. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * If successful save assigned fc_id and MAC, program firmware + * and start fdls discovery, else restart vlan discovery. + */ +void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_flogi_rsp *flogi_rsp = (struct fip_flogi_rsp *)fiph; + int desc_len; + uint32_t s_id; + int frame_type; + uint16_t oxid; + + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + struct fc_frame_header *fchdr = &flogi_rsp->rsp_desc.flogi.fchdr; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p FIP FLOGI rsp\n", fnic); + desc_len = be16_to_cpu(flogi_rsp->fip.fip_dl_len); + if (desc_len != 38) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Invalid Descriptor List len (%x). Dropping frame\n", + desc_len); + return; + } + + if (!((flogi_rsp->rsp_desc.fd_desc.fip_dtype == 7) + && (flogi_rsp->rsp_desc.fd_desc.fip_dlen == 36)) + || !((flogi_rsp->mac_desc.fd_desc.fip_dtype == 2) + && (flogi_rsp->mac_desc.fd_desc.fip_dlen == 2))) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping frame invalid type and len mix\n"); + return; + } + + frame_type = fnic_fdls_validate_and_get_frame_type(iport, fchdr); + + s_id = ntoh24(fchdr->fh_s_id); + if ((fchdr->fh_f_ctl[0] != 0x98) + || (fchdr->fh_r_ctl != 0x23) + || (s_id != FC_FID_FLOGI) + || (frame_type != FNIC_FABRIC_FLOGI_RSP) + || (fchdr->fh_type != 0x01)) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping invalid frame: s_id %x F %x R %x t %x OX_ID %x\n", + s_id, fchdr->fh_f_ctl[0], fchdr->fh_r_ctl, + fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + return; + } + + if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p rsp for pending FLOGI\n", fnic); + + oxid = FNIC_STD_GET_OX_ID(fchdr); + fdls_free_oxid(iport, oxid, &iport->active_oxid_fabric_req); + del_timer_sync(&fnic->retry_fip_timer); + + if ((be16_to_cpu(flogi_rsp->fip.fip_dl_len) == FIP_FLOGI_LEN) + && (flogi_rsp->rsp_desc.flogi.els.fl_cmd == ELS_LS_ACC)) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p FLOGI success\n", fnic); + memcpy(iport->fpma, flogi_rsp->mac_desc.fd_mac, ETH_ALEN); + iport->fcid = + ntoh24(flogi_rsp->rsp_desc.flogi.fchdr.fh_d_id); + + iport->r_a_tov = + be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_r_a_tov); + iport->e_d_tov = + be32_to_cpu(flogi_rsp->rsp_desc.flogi.els.fl_csp.sp_e_d_tov); + memcpy(fnic->iport.fcfmac, iport->selected_fcf.fcf_mac, + ETH_ALEN); + vnic_dev_add_addr(fnic->vdev, flogi_rsp->mac_desc.fd_mac); + + if (fnic_fdls_register_portid(iport, iport->fcid, NULL) + != 0) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "fnic 0x%p flogi registration failed\n", + fnic); + return; + } + + iport->fip.state = FDLS_FIP_FLOGI_COMPLETE; + iport->state = FNIC_IPORT_STATE_FABRIC_DISC; + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "iport->state:%d\n", + iport->state); + fnic_fdls_disc_start(iport); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0))) { + u64 tov; + + tov = jiffies + + + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, + round_jiffies(tov)); + + tov = + jiffies + + msecs_to_jiffies(FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, + round_jiffies(tov)); + + } + } else { + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + atomic64_inc(&fnic_stats->vlan_stats.flogi_rejects); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + + iport->fip.state = FDLS_FIP_VLAN_DISCOVERY_STARTED; + } + } +} + +/** + * fnic_common_fip_cleanup - Clean up FCF info and timers in case of + * link down/CVL + * @fnic: Handle to fnic driver instance + */ +void fnic_common_fip_cleanup(struct fnic *fnic) +{ + + struct fnic_iport_s *iport = &fnic->iport; + + if (!iport->usefip) + return; + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fip cleanup\n", fnic); + + iport->fip.state = FDLS_FIP_INIT; + + del_timer_sync(&fnic->retry_fip_timer); + del_timer_sync(&fnic->fcs_ka_timer); + del_timer_sync(&fnic->enode_ka_timer); + del_timer_sync(&fnic->vn_ka_timer); + + if (!is_zero_ether_addr(iport->fpma)) + vnic_dev_del_addr(fnic->vdev, iport->fpma); + + memset(iport->fpma, 0, ETH_ALEN); + iport->fcid = 0; + iport->r_a_tov = 0; + iport->e_d_tov = 0; + memset(fnic->iport.fcfmac, 0, ETH_ALEN); + memset(iport->selected_fcf.fcf_mac, 0, ETH_ALEN); + iport->selected_fcf.fcf_priority = 0; + iport->selected_fcf.fka_adv_period = 0; + iport->selected_fcf.ka_disabled = 0; + + fnic_fcoe_reset_vlans(fnic); +} + +/** + * fnic_fcoe_process_cvl - Processes Clear Virtual Link from FCF. + * @fnic: Handle to fnic driver instance + * @fiph: Received frame + * + * Verify that cvl is received from our current FCF for our assigned MAC + * and clean up and restart the vlan discovery. + */ +void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph) +{ + struct fnic_iport_s *iport = &fnic->iport; + struct fip_cvl *cvl_msg = (struct fip_cvl *)fiph; + int i; + int found = false; + int max_count = 0; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p clear virtual link handler\n", fnic); + + if (!((cvl_msg->fcf_mac_desc.fd_desc.fip_dtype == 2) + && (cvl_msg->fcf_mac_desc.fd_desc.fip_dlen == 2)) + || !((cvl_msg->name_desc.fd_desc.fip_dtype == 4) + && (cvl_msg->name_desc.fd_desc.fip_dlen == 3))) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "invalid mix: ft %x fl %x ndt %x ndl %x", + cvl_msg->fcf_mac_desc.fd_desc.fip_dtype, + cvl_msg->fcf_mac_desc.fd_desc.fip_dlen, + cvl_msg->name_desc.fd_desc.fip_dtype, + cvl_msg->name_desc.fd_desc.fip_dlen); + } + + if (memcmp + (iport->selected_fcf.fcf_mac, cvl_msg->fcf_mac_desc.fd_mac, ETH_ALEN) + == 0) { + for (i = 0; i < ((be16_to_cpu(fiph->fip_dl_len) / 5) - 1); i++) { + if (!((cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype == 11) + && (cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen == 5))) { + + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, + "Invalid type and len mix type: %d len: %d\n", + cvl_msg->vn_ports_desc[i].fd_desc.fip_dtype, + cvl_msg->vn_ports_desc[i].fd_desc.fip_dlen); + } + if (memcmp + (iport->fpma, cvl_msg->vn_ports_desc[i].fd_mac, + ETH_ALEN) == 0) { + found = true; + break; + } + } + if (!found) + return; + fnic_common_fip_cleanup(fnic); + + while (fnic->reset_in_progress == IN_PROGRESS) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(5000)); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + max_count++; + if (max_count >= FIP_FNIC_RESET_WAIT_COUNT) { + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Rthr waited too long. Skipping handle link event %p\n", + fnic); + return; + } + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic reset in progress. Link event needs to wait %p", + fnic); + } + fnic->reset_in_progress = IN_PROGRESS; + fnic_fdls_link_down(iport); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); + fnic_fcoe_send_vlan_req(fnic); + } +} + +/** + * fdls_fip_recv_frame - Demultiplexer for FIP frames + * @fnic: Handle to fnic driver instance + * @frame: Received ethernet frame + */ +int fdls_fip_recv_frame(struct fnic *fnic, void *frame) +{ + struct ethhdr *eth = (struct ethhdr *)frame; + struct fip_header *fiph; + u16 op; + u8 sub; + int len = 2048; + + if (be16_to_cpu(eth->h_proto) == ETH_P_FIP) { + fiph = (struct fip_header *)(eth + 1); + op = be16_to_cpu(fiph->fip_op); + sub = fiph->fip_subcode; + + fnic_debug_dump_fip_frame(fnic, eth, len, "Incoming"); + + if (op == FIP_OP_DISC && sub == FIP_SC_REP) + fnic_fcoe_fip_discovery_resp(fnic, fiph); + else if (op == FIP_OP_VLAN && sub == FIP_SC_REP) + fnic_fcoe_process_vlan_resp(fnic, fiph); + else if (op == FIP_OP_CTRL && sub == FIP_SC_REP) + fnic_fcoe_process_cvl(fnic, fiph); + else if (op == FIP_OP_LS && sub == FIP_SC_REP) + fnic_fcoe_process_flogi_resp(fnic, fiph); + + /* Return true if the frame was a FIP frame */ + return true; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Not a FIP Frame"); + return false; +} + +void fnic_work_on_fip_timer(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_timer_work); + struct fnic_iport_s *iport = &fnic->iport; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FIP timeout\n"); + + if (iport->fip.state == FDLS_FIP_VLAN_DISCOVERY_STARTED) { + fnic_vlan_discovery_timeout(fnic); + } else if (iport->fip.state == FDLS_FIP_FCF_DISCOVERY_STARTED) { + u8 zmac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 }; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FCF Discovery timeout\n"); + if (memcmp(iport->selected_fcf.fcf_mac, zmac, ETH_ALEN) != 0) { + + if (iport->flags & FNIC_FIRST_LINK_UP) { + fnic_scsi_fcpio_reset(iport->fnic); + iport->flags &= ~FNIC_FIRST_LINK_UP; + } + + fnic_fcoe_start_flogi(fnic); + if (!((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0))) { + u64 fcf_tov; + + fcf_tov = jiffies + + 3 + * + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->fcs_ka_timer, + round_jiffies(fcf_tov)); + } + } else { + FNIC_FIP_DBG(KERN_INFO, fnic->host, + fnic->fnic_num, "FCF Discovery timeout\n"); + fnic_vlan_discovery_timeout(fnic); + } + } else if (iport->fip.state == FDLS_FIP_FLOGI_STARTED) { + fdls_schedule_oxid_free(iport, &iport->active_oxid_fabric_req); + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI timeout\n"); + if (iport->fip.flogi_retry < fnic->config.flogi_retries) + fnic_fcoe_start_flogi(fnic); + else + fnic_vlan_discovery_timeout(fnic); + } +} + +/** + * fnic_handle_fip_timer - Timeout handler for FIP discover phase. + * @t: Handle to the timer list + * + * Based on the current state, start next phase or restart discovery. + */ +void fnic_handle_fip_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, retry_fip_timer); + + INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fip_timer); + queue_work(fnic_fip_queue, &fnic->fip_timer_work); +} + +/** + * fnic_handle_enode_ka_timer - FIP node keep alive. + * @t: Handle to the timer list + */ +void fnic_handle_enode_ka_timer(struct timer_list *t) +{ + uint8_t *frame; + struct fnic *fnic = from_timer(fnic, t, enode_ka_timer); + + struct fnic_iport_s *iport = &fnic->iport; + struct fip_enode_ka *penode_ka; + u64 enode_ka_tov; + uint16_t frame_size = sizeof(struct fip_enode_ka); + + if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) + return; + + if ((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0)) { + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send enode ka"); + return; + } + + penode_ka = (struct fip_enode_ka *) frame; + *penode_ka = (struct fip_enode_ka) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_CTRL), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_ENODE_KA_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}} + }; + + memcpy(penode_ka->eth.h_source, iport->hwmac, ETH_ALEN); + memcpy(penode_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); + memcpy(penode_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + + FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Handle enode KA timer\n"); + fnic_send_fip_frame(iport, frame, frame_size); + enode_ka_tov = jiffies + + msecs_to_jiffies(iport->selected_fcf.fka_adv_period); + mod_timer(&fnic->enode_ka_timer, round_jiffies(enode_ka_tov)); +} + +/** + * fnic_handle_vn_ka_timer - FIP virtual port keep alive. + * @t: Handle to the timer list + */ +void fnic_handle_vn_ka_timer(struct timer_list *t) +{ + uint8_t *frame; + struct fnic *fnic = from_timer(fnic, t, vn_ka_timer); + + struct fnic_iport_s *iport = &fnic->iport; + struct fip_vn_port_ka *pvn_port_ka; + u64 vn_ka_tov; + uint8_t fcid[3]; + uint16_t frame_size = sizeof(struct fip_vn_port_ka); + + if (iport->fip.state != FDLS_FIP_FLOGI_COMPLETE) + return; + + if ((iport->selected_fcf.ka_disabled) + || (iport->selected_fcf.fka_adv_period == 0)) { + return; + } + + frame = fdls_alloc_frame(iport); + if (frame == NULL) { + FNIC_FIP_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Failed to allocate frame to send vn ka"); + return; + } + + pvn_port_ka = (struct fip_vn_port_ka *) frame; + *pvn_port_ka = (struct fip_vn_port_ka) { + .eth = { + .h_proto = cpu_to_be16(ETH_P_FIP)}, + .fip = { + .fip_ver = FIP_VER_ENCAPS(FIP_VER), + .fip_op = cpu_to_be16(FIP_OP_CTRL), + .fip_subcode = FIP_SC_REQ, + .fip_dl_len = cpu_to_be16(FIP_VN_KA_LEN)}, + .mac_desc = {.fd_desc = {.fip_dtype = FIP_DT_MAC, .fip_dlen = 2}}, + .vn_port_desc = {.fd_desc = {.fip_dtype = FIP_DT_VN_ID, .fip_dlen = 5}} + }; + + memcpy(pvn_port_ka->eth.h_source, iport->fpma, ETH_ALEN); + memcpy(pvn_port_ka->eth.h_dest, iport->selected_fcf.fcf_mac, ETH_ALEN); + memcpy(pvn_port_ka->mac_desc.fd_mac, iport->hwmac, ETH_ALEN); + memcpy(pvn_port_ka->vn_port_desc.fd_mac, iport->fpma, ETH_ALEN); + hton24(fcid, iport->fcid); + memcpy(pvn_port_ka->vn_port_desc.fd_fc_id, fcid, 3); + FNIC_STD_SET_NPORT_NAME(&pvn_port_ka->vn_port_desc.fd_wwpn, iport->wwpn); + + FNIC_FIP_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Handle vnport KA timer\n"); + fnic_send_fip_frame(iport, frame, frame_size); + vn_ka_tov = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); + mod_timer(&fnic->vn_ka_timer, round_jiffies(vn_ka_tov)); +} + +/** + * fnic_vlan_discovery_timeout - Handle vlan discovery timeout + * @fnic: Handle to fnic driver instance + * + * End of VLAN discovery or FCF discovery time window. + * Start the FCF discovery if VLAN was never used. + */ +void fnic_vlan_discovery_timeout(struct fnic *fnic) +{ + struct fcoe_vlan *vlan; + struct fnic_iport_s *iport = &fnic->iport; + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (!iport->usefip) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlan_list)) { + /* no vlans available, try again */ + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + return; + } + + vlan = list_first_entry(&fnic->vlan_list, struct fcoe_vlan, list); + + if (vlan->state == FIP_VLAN_SENT) { + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlan_list)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + fnic_fcoe_send_vlan_req(fnic); + return; + } + /* check the next vlan */ + vlan = + list_first_entry(&fnic->vlan_list, struct fcoe_vlan, + list); + + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + + } + atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); + + } else { + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_fcoe_start_fcf_discovery(fnic); +} + +/** + * fnic_work_on_fcs_ka_timer - Handle work on FCS keep alive timer. + * @work: the work queue to be serviced + * + * Finish handling fcs_ka_timer in process context. + * Clean up, bring the link down, and restart all FIP discovery. + */ +void fnic_work_on_fcs_ka_timer(struct work_struct *work) +{ + struct fnic + *fnic = container_of(work, struct fnic, fip_timer_work); + struct fnic_iport_s *iport = &fnic->iport; + + FNIC_FIP_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fcs ka timeout\n", fnic); + + fnic_common_fip_cleanup(fnic); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic_fdls_link_down(iport); + iport->state = FNIC_IPORT_STATE_FIP; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + + fnic_fcoe_send_vlan_req(fnic); +} + +/** + * fnic_handle_fcs_ka_timer - Handle FCS keep alive timer. + * @t: Handle to the timer list + * + * No keep alives received from FCF. Clean up, bring the link down + * and restart all the FIP discovery. + */ +void fnic_handle_fcs_ka_timer(struct timer_list *t) +{ + struct fnic *fnic = from_timer(fnic, t, fcs_ka_timer); + + INIT_WORK(&fnic->fip_timer_work, fnic_work_on_fcs_ka_timer); + queue_work(fnic_fip_queue, &fnic->fip_timer_work); +} diff --git a/drivers/scsi/fnic/fip.h b/drivers/scsi/fnic/fip.h new file mode 100644 index 000000000000..79fee7628870 --- /dev/null +++ b/drivers/scsi/fnic/fip.h @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ +#ifndef _FIP_H_ +#define _FIP_H_ + +#include "fdls_fc.h" +#include "fnic_fdls.h" +#include <scsi/fc/fc_fip.h> + +/* Drop the cast from the standard definition */ +#define FCOE_ALL_FCFS_MAC {0x01, 0x10, 0x18, 0x01, 0x00, 0x02} +#define FCOE_MAX_SIZE 0x082E + +#define FCOE_CTLR_FIPVLAN_TOV (3*1000) +#define FCOE_CTLR_FCS_TOV (3*1000) +#define FCOE_CTLR_MAX_SOL (5*1000) + +#define FIP_DISC_SOL_LEN (6) +#define FIP_VLAN_REQ_LEN (2) +#define FIP_ENODE_KA_LEN (2) +#define FIP_VN_KA_LEN (7) +#define FIP_FLOGI_LEN (38) + +enum fdls_vlan_state { + FIP_VLAN_AVAIL, + FIP_VLAN_SENT +}; + +enum fdls_fip_state { + FDLS_FIP_INIT, + FDLS_FIP_VLAN_DISCOVERY_STARTED, + FDLS_FIP_FCF_DISCOVERY_STARTED, + FDLS_FIP_FLOGI_STARTED, + FDLS_FIP_FLOGI_COMPLETE, +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + uint16_t vid; /* vlan ID */ + uint16_t sol_count; /* no. of sols sent */ + uint16_t state; /* state */ +}; + +struct fip_vlan_req { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_vlan_notif { + struct fip_header fip; + struct fip_vlan_desc vlans_desc[]; +} __packed; + +struct fip_vn_port_ka { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; + struct fip_vn_desc vn_port_desc; +} __packed; + +struct fip_enode_ka { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_cvl { + struct fip_header fip; + struct fip_mac_desc fcf_mac_desc; + struct fip_wwn_desc name_desc; + struct fip_vn_desc vn_ports_desc[]; +} __packed; + +struct fip_flogi_desc { + struct fip_desc fd_desc; + uint16_t rsvd; + struct fc_std_flogi flogi; +} __packed; + +struct fip_flogi_rsp_desc { + struct fip_desc fd_desc; + uint16_t rsvd; + struct fc_std_flogi flogi; +} __packed; + +struct fip_flogi { + struct ethhdr eth; + struct fip_header fip; + struct fip_flogi_desc flogi_desc; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_flogi_rsp { + struct fip_header fip; + struct fip_flogi_rsp_desc rsp_desc; + struct fip_mac_desc mac_desc; +} __packed; + +struct fip_discovery { + struct ethhdr eth; + struct fip_header fip; + struct fip_mac_desc mac_desc; + struct fip_wwn_desc name_desc; + struct fip_size_desc fcoe_desc; +} __packed; + +struct fip_disc_adv { + struct fip_header fip; + struct fip_pri_desc prio_desc; + struct fip_mac_desc mac_desc; + struct fip_wwn_desc name_desc; + struct fip_fab_desc fabric_desc; + struct fip_fka_desc fka_adv_desc; +} __packed; + +void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_fcoe_fip_discovery_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_fcoe_process_flogi_resp(struct fnic *fnic, struct fip_header *fiph); +void fnic_work_on_fip_timer(struct work_struct *work); +void fnic_work_on_fcs_ka_timer(struct work_struct *work); +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +void fnic_fcoe_start_fcf_discovery(struct fnic *fnic); +void fnic_fcoe_start_flogi(struct fnic *fnic); +void fnic_fcoe_process_cvl(struct fnic *fnic, struct fip_header *fiph); +void fnic_vlan_discovery_timeout(struct fnic *fnic); + +extern struct workqueue_struct *fnic_fip_queue; + +#ifdef FNIC_DEBUG +static inline void +fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, + int len, char *pfx) +{ + struct fip_header *fiph = (struct fip_header *)(eth + 1); + u16 op = be16_to_cpu(fiph->fip_op); + u8 sub = fiph->fip_subcode; + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "FIP %s packet contents: op: 0x%x sub: 0x%x (len = %d)", + pfx, op, sub, len); + + fnic_debug_dump(fnic, (uint8_t *)eth, len); +} + +#else /* FNIC_DEBUG */ + +static inline void +fnic_debug_dump_fip_frame(struct fnic *fnic, struct ethhdr *eth, + int len, char *pfx) {} +#endif /* FNIC_DEBUG */ + +#endif /* _FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index ce73f08ee889..6c5f6046b1f5 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h @@ -10,8 +10,10 @@ #include <linux/netdevice.h> #include <linux/workqueue.h> #include <linux/bitops.h> -#include <scsi/libfc.h> -#include <scsi/libfcoe.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_transport_fc.h> +#include <scsi/fc_frame.h> #include "fnic_io.h" #include "fnic_res.h" #include "fnic_trace.h" @@ -24,13 +26,15 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "vnic_scsi.h" +#include "fnic_fdls.h" #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.7.0.0" +#define DRV_VERSION "1.8.0.0" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " +#define FABRIC_LOGO_MAX_RETRY 3 #define DESC_CLEAN_LOW_WATERMARK 8 #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ @@ -38,6 +42,7 @@ #define FNIC_DFLT_IO_REQ 256 /* Default scsi_cmnd tag map entries */ #define FNIC_DFLT_QUEUE_DEPTH 256 #define FNIC_STATS_RATE_LIMIT 4 /* limit rate at which stats are pulled up */ +#define LUN0_DELAY_TIME 9 /* * Tag bits used for special requests. @@ -75,6 +80,77 @@ #define FNIC_DEV_RST_TERM_DONE BIT(20) #define FNIC_DEV_RST_ABTS_PENDING BIT(21) +#define FNIC_FW_RESET_TIMEOUT 60000 /* mSec */ +#define FNIC_FCOE_MAX_CMD_LEN 16 +/* Retry supported by rport (returned by PRLI service parameters) */ +#define FNIC_FC_RP_FLAGS_RETRY 0x1 + +/* Cisco vendor id */ +#define PCI_VENDOR_ID_CISCO 0x1137 +#define PCI_DEVICE_ID_CISCO_VIC_FC 0x0045 /* fc vnic */ + +/* sereno pcie switch */ +#define PCI_DEVICE_ID_CISCO_SERENO 0x004e +#define PCI_DEVICE_ID_CISCO_CRUZ 0x007a /* Cruz */ +#define PCI_DEVICE_ID_CISCO_BODEGA 0x0131 /* Bodega */ +#define PCI_DEVICE_ID_CISCO_BEVERLY 0x025f /* Beverly */ + +/* Sereno */ +#define PCI_SUBDEVICE_ID_CISCO_VASONA 0x004f /* vasona mezz */ +#define PCI_SUBDEVICE_ID_CISCO_COTATI 0x0084 /* cotati mlom */ +#define PCI_SUBDEVICE_ID_CISCO_LEXINGTON 0x0085 /* lexington pcie */ +#define PCI_SUBDEVICE_ID_CISCO_ICEHOUSE 0x00cd /* Icehouse */ +#define PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE 0x00ce /* KirkwoodLake pcie */ +#define PCI_SUBDEVICE_ID_CISCO_SUSANVILLE 0x012e /* Susanville MLOM */ +#define PCI_SUBDEVICE_ID_CISCO_TORRANCE 0x0139 /* Torrance MLOM */ + +/* Cruz */ +#define PCI_SUBDEVICE_ID_CISCO_CALISTOGA 0x012c /* Calistoga MLOM */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW 0x0137 /* Cruz Mezz */ +/* Cruz MountTian SIOC */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN 0x014b +#define PCI_SUBDEVICE_ID_CISCO_CLEARLAKE 0x014d /* ClearLake pcie */ +/* Cruz MountTian2 SIOC */ +#define PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2 0x0157 +#define PCI_SUBDEVICE_ID_CISCO_CLAREMONT 0x015d /* Claremont MLOM */ + +/* Bodega */ +/* VIC 1457 PCIe mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BRADBURY 0x0218 +#define PCI_SUBDEVICE_ID_CISCO_BRENTWOOD 0x0217 /* VIC 1455 PCIe */ +/* VIC 1487 PCIe mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BURLINGAME 0x021a +#define PCI_SUBDEVICE_ID_CISCO_BAYSIDE 0x0219 /* VIC 1485 PCIe */ +/* VIC 1440 Mezz mLOM */ +#define PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD 0x0215 +#define PCI_SUBDEVICE_ID_CISCO_BOONVILLE 0x0216 /* VIC 1480 Mezz */ +#define PCI_SUBDEVICE_ID_CISCO_BENICIA 0x024a /* VIC 1495 */ +#define PCI_SUBDEVICE_ID_CISCO_BEAUMONT 0x024b /* VIC 1497 */ +#define PCI_SUBDEVICE_ID_CISCO_BRISBANE 0x02af /* VIC 1467 */ +#define PCI_SUBDEVICE_ID_CISCO_BENTON 0x02b0 /* VIC 1477 */ +#define PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER 0x02cf /* VIC 14425 */ +#define PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK 0x02d0 /* VIC 14825 */ + +/* Beverly */ +#define PCI_SUBDEVICE_ID_CISCO_BERN 0x02de /* VIC 15420 */ +#define PCI_SUBDEVICE_ID_CISCO_STOCKHOLM 0x02dd /* VIC 15428 */ +#define PCI_SUBDEVICE_ID_CISCO_KRAKOW 0x02dc /* VIC 15411 */ +#define PCI_SUBDEVICE_ID_CISCO_LUCERNE 0x02db /* VIC 15231 */ +#define PCI_SUBDEVICE_ID_CISCO_TURKU 0x02e8 /* VIC 15238 */ +#define PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS 0x02f3 /* VIC 15237 */ +#define PCI_SUBDEVICE_ID_CISCO_ZURICH 0x02df /* VIC 15230 */ +#define PCI_SUBDEVICE_ID_CISCO_RIGA 0x02e0 /* VIC 15427 */ +#define PCI_SUBDEVICE_ID_CISCO_GENEVA 0x02e1 /* VIC 15422 */ +#define PCI_SUBDEVICE_ID_CISCO_HELSINKI 0x02e4 /* VIC 15235 */ +#define PCI_SUBDEVICE_ID_CISCO_GOTHENBURG 0x02f2 /* VIC 15425 */ + +struct fnic_pcie_device { + u32 device; + u8 *desc; + u32 subsystem_device; + u8 *subsys_desc; +}; + /* * fnic private data per SCSI command. * These fields are locked by the hashed io_req_lock. @@ -127,8 +203,38 @@ static inline u64 fnic_flags_and_state(struct scsi_cmnd *cmd) #define fnic_clear_state_flags(fnicp, st_flags) \ __fnic_set_state_flags(fnicp, st_flags, 1) +enum reset_states { + NOT_IN_PROGRESS = 0, + IN_PROGRESS, + RESET_ERROR +}; + +enum rscn_type { + NOT_PC_RSCN = 0, + PC_RSCN +}; + +enum pc_rscn_handling_status { + PC_RSCN_HANDLING_NOT_IN_PROGRESS = 0, + PC_RSCN_HANDLING_IN_PROGRESS +}; + +enum pc_rscn_handling_feature { + PC_RSCN_HANDLING_FEATURE_OFF = 0, + PC_RSCN_HANDLING_FEATURE_ON +}; + +extern unsigned int fnic_fdmi_support; extern unsigned int fnic_log_level; extern unsigned int io_completions; +extern struct workqueue_struct *fnic_event_queue; + +extern unsigned int pc_rscn_handling_feature_flag; +extern spinlock_t reset_fnic_list_lock; +extern struct list_head reset_fnic_list; +extern struct workqueue_struct *reset_fnic_work_queue; +extern struct work_struct reset_fnic_work; + #define FNIC_MAIN_LOGGING 0x01 #define FNIC_FCS_LOGGING 0x02 @@ -155,6 +261,12 @@ do { \ "fnic<%d>: %s: %d: " fmt, fnic_num,\ __func__, __LINE__, ##args);) +#define FNIC_FIP_DBG(kern_level, host, fnic_num, fmt, args...) \ + FNIC_CHECK_LOGGING(FNIC_FCS_LOGGING, \ + shost_printk(kern_level, host, \ + "fnic<%d>: %s: %d: " fmt, fnic_num,\ + __func__, __LINE__, ##args);) + #define FNIC_SCSI_DBG(kern_level, host, fnic_num, fmt, args...) \ FNIC_CHECK_LOGGING(FNIC_SCSI_LOGGING, \ shost_printk(kern_level, host, \ @@ -213,12 +325,26 @@ enum fnic_state { struct mempool; +enum fnic_role_e { + FNIC_ROLE_FCP_INITIATOR = 0, +}; + enum fnic_evt { FNIC_EVT_START_VLAN_DISC = 1, FNIC_EVT_START_FCF_DISC = 2, FNIC_EVT_MAX, }; +struct fnic_frame_list { + /* + * Link to frame lists + */ + struct list_head links; + void *fp; + int frame_len; + int rx_ethhdr_stripped; +}; + struct fnic_event { struct list_head list; struct fnic *fnic; @@ -235,8 +361,9 @@ struct fnic_cpy_wq { /* Per-instance private data structure */ struct fnic { int fnic_num; - struct fc_lport *lport; - struct fcoe_ctlr ctlr; /* FIP FCoE controller structure */ + enum fnic_role_e role; + struct fnic_iport_s iport; + struct Scsi_Host *host; struct vnic_dev_bar bar0; struct fnic_msix_entry msix[FNIC_MSIX_INTR_MAX]; @@ -255,6 +382,7 @@ struct fnic { unsigned int wq_count; unsigned int cq_count; + struct completion reset_completion_wait; struct mutex sgreset_mutex; spinlock_t sgreset_lock; /* lock for sgreset */ struct scsi_cmnd *sgreset_sc; @@ -268,25 +396,27 @@ struct fnic { u32 vlan_hw_insert:1; /* let hw insert the tag */ u32 in_remove:1; /* fnic device in removal */ u32 stop_rx_link_events:1; /* stop proc. rx frames, link events */ - u32 link_events:1; /* set when we get any link event*/ - - struct completion *remove_wait; /* device remove thread blocks */ + struct completion *fw_reset_done; + u32 reset_in_progress; atomic_t in_flight; /* io counter */ bool internal_reset_inprogress; u32 _reserved; /* fill hole */ unsigned long state_flags; /* protected by host lock */ enum fnic_state state; spinlock_t fnic_lock; + unsigned long lock_flags; u16 vlan_id; /* VLAN tag including priority */ u8 data_src_addr[ETH_ALEN]; u64 fcp_input_bytes; /* internal statistic */ u64 fcp_output_bytes; /* internal statistic */ u32 link_down_cnt; + u32 soft_reset_count; int link_status; struct list_head list; + struct list_head links; struct pci_dev *pdev; struct vnic_fc_config config; struct vnic_dev *vdev; @@ -306,19 +436,29 @@ struct fnic { struct work_struct link_work; struct work_struct frame_work; struct work_struct flush_work; - struct sk_buff_head frame_queue; - struct sk_buff_head tx_queue; + struct list_head frame_queue; + struct list_head tx_queue; + mempool_t *frame_pool; + mempool_t *frame_elem_pool; + struct work_struct tport_work; + struct list_head tport_event_list; + + char subsys_desc[14]; + int subsys_desc_len; + int pc_rscn_handling_status; /*** FIP related data members -- start ***/ void (*set_vlan)(struct fnic *, u16 vlan); struct work_struct fip_frame_work; - struct sk_buff_head fip_frame_queue; + struct work_struct fip_timer_work; + struct list_head fip_frame_queue; struct timer_list fip_timer; - struct list_head vlans; spinlock_t vlans_lock; - - struct work_struct event_work; - struct list_head evlist; + struct timer_list retry_fip_timer; + struct timer_list fcs_ka_timer; + struct timer_list enode_ka_timer; + struct timer_list vn_ka_timer; + struct list_head vlan_list; /*** FIP related data members -- end ***/ /* copy work queue cache line section */ @@ -341,11 +481,6 @@ struct fnic { ____cacheline_aligned struct vnic_intr intr[FNIC_MSIX_INTR_MAX]; }; -static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) -{ - return container_of(fip, struct fnic, ctlr); -} - extern struct workqueue_struct *fnic_event_queue; extern struct workqueue_struct *fnic_fip_queue; extern const struct attribute_group *fnic_host_groups[]; @@ -356,29 +491,29 @@ int fnic_set_intr_mode_msix(struct fnic *fnic); void fnic_free_intr(struct fnic *fnic); int fnic_request_intr(struct fnic *fnic); -int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); +void fnic_tport_event_handler(struct work_struct *work); void fnic_handle_link(struct work_struct *work); void fnic_handle_event(struct work_struct *work); +void fdls_reclaim_oxid_handler(struct work_struct *work); +void fdls_schedule_oxid_free(struct fnic_iport_s *iport, uint16_t *active_oxid); +void fdls_schedule_oxid_free_retry_work(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); void fnic_flush_tx(struct work_struct *work); -void fnic_eth_send(struct fcoe_ctlr *, struct sk_buff *skb); -void fnic_set_port_id(struct fc_lport *, u32, struct fc_frame *); -void fnic_update_mac(struct fc_lport *, u8 *new); void fnic_update_mac_locked(struct fnic *, u8 *new); int fnic_queuecommand(struct Scsi_Host *, struct scsi_cmnd *); int fnic_abort_cmd(struct scsi_cmnd *); int fnic_device_reset(struct scsi_cmnd *); -int fnic_host_reset(struct scsi_cmnd *); -int fnic_reset(struct Scsi_Host *); -void fnic_scsi_cleanup(struct fc_lport *); -void fnic_scsi_abort_io(struct fc_lport *); -void fnic_empty_scsi_cleanup(struct fc_lport *); -void fnic_exch_mgr_reset(struct fc_lport *, u32, u32); +int fnic_eh_host_reset_handler(struct scsi_cmnd *sc); +int fnic_host_reset(struct Scsi_Host *shost); +void fnic_reset(struct Scsi_Host *shost); +int fnic_issue_fc_host_lip(struct Scsi_Host *shost); +void fnic_get_host_port_state(struct Scsi_Host *shost); +void fnic_scsi_fcpio_reset(struct fnic *fnic); int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do, unsigned int cq_index); int fnic_wq_cmpl_handler(struct fnic *fnic, int); int fnic_flogi_reg_handler(struct fnic *fnic, u32); @@ -390,14 +525,15 @@ const char *fnic_state_to_str(unsigned int state); void fnic_mq_map_queues_cpus(struct Scsi_Host *host); void fnic_log_q_error(struct fnic *fnic); void fnic_handle_link_event(struct fnic *fnic); - +int fnic_stats_debugfs_init(struct fnic *fnic); +void fnic_stats_debugfs_remove(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); void fnic_handle_fip_frame(struct work_struct *work); +void fnic_reset_work_handler(struct work_struct *work); void fnic_handle_fip_event(struct fnic *fnic); void fnic_fcoe_reset_vlans(struct fnic *fnic); -void fnic_fcoe_evlist_free(struct fnic *fnic); -extern void fnic_handle_fip_timer(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct timer_list *t); static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) @@ -406,4 +542,90 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) } void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); +void fnic_free_txq(struct list_head *head); +int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, + char **subsys_desc); +void fnic_fdls_link_status_change(struct fnic *fnic, int linkup); +void fnic_delete_fcp_tports(struct fnic *fnic); +void fnic_flush_tport_event_list(struct fnic *fnic); +int fnic_count_ioreqs_wq(struct fnic *fnic, u32 hwq, u32 portid); +unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid); +unsigned int fnic_count_all_ioreqs(struct fnic *fnic); +unsigned int fnic_count_lun_ioreqs_wq(struct fnic *fnic, u32 hwq, + struct scsi_device *device); +unsigned int fnic_count_lun_ioreqs(struct fnic *fnic, + struct scsi_device *device); +void fnic_scsi_unload(struct fnic *fnic); +void fnic_scsi_unload_cleanup(struct fnic *fnic); +int fnic_get_debug_info(struct stats_debug_info *info, + struct fnic *fnic); + +struct fnic_scsi_iter_data { + struct fnic *fnic; + void *data1; + void *data2; + bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2); +}; + +static inline bool +fnic_io_iter_handler(struct scsi_cmnd *sc, void *iter_data) +{ + struct fnic_scsi_iter_data *iter = iter_data; + + return iter->fn(iter->fnic, sc, iter->data1, iter->data2); +} + +static inline void +fnic_scsi_io_iter(struct fnic *fnic, + bool (*fn)(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2), + void *data1, void *data2) +{ + struct fnic_scsi_iter_data iter_data = { + .fn = fn, + .fnic = fnic, + .data1 = data1, + .data2 = data2, + }; + scsi_host_busy_iter(fnic->host, fnic_io_iter_handler, &iter_data); +} + +#ifdef FNIC_DEBUG +static inline void +fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) +{ + int i; + + for (i = 0; i < len; i = i+8) { + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "%d: %02x %02x %02x %02x %02x %02x %02x %02x", i / 8, + u8arr[i + 0], u8arr[i + 1], u8arr[i + 2], u8arr[i + 3], + u8arr[i + 4], u8arr[i + 5], u8arr[i + 6], u8arr[i + 7]); + } +} + +static inline void +fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, + int len, char *pfx) +{ + uint32_t s_id, d_id; + + s_id = ntoh24(fchdr->fh_s_id); + d_id = ntoh24(fchdr->fh_d_id); + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "%s packet contents: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x (len = %d)\n", + pfx, s_id, d_id, fchdr->fh_type, + FNIC_STD_GET_OX_ID(fchdr), len); + + fnic_debug_dump(fnic, (uint8_t *)fchdr, len); + +} +#else /* FNIC_DEBUG */ +static inline void +fnic_debug_dump(struct fnic *fnic, uint8_t *u8arr, int len) {} +static inline void +fnic_debug_dump_fc_frame(struct fnic *fnic, struct fc_frame_header *fchdr, + uint32_t len, char *pfx) {} +#endif /* FNIC_DEBUG */ #endif /* _FNIC_H_ */ diff --git a/drivers/scsi/fnic/fnic_attrs.c b/drivers/scsi/fnic/fnic_attrs.c index 0c5e57c7e322..705718f0809b 100644 --- a/drivers/scsi/fnic/fnic_attrs.c +++ b/drivers/scsi/fnic/fnic_attrs.c @@ -11,8 +11,8 @@ static ssize_t fnic_show_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct fc_lport *lp = shost_priv(class_to_shost(dev)); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = + *((struct fnic **) shost_priv(class_to_shost(dev))); return sysfs_emit(buf, "%s\n", fnic_state_str[fnic->state]); } @@ -26,9 +26,13 @@ static ssize_t fnic_show_drv_version(struct device *dev, static ssize_t fnic_show_link_state(struct device *dev, struct device_attribute *attr, char *buf) { - struct fc_lport *lp = shost_priv(class_to_shost(dev)); + struct fnic *fnic = + *((struct fnic **) shost_priv(class_to_shost(dev))); - return sysfs_emit(buf, "%s\n", (lp->link_up) ? "Link Up" : "Link Down"); + return sysfs_emit(buf, "%s\n", + ((fnic->iport.state != FNIC_IPORT_STATE_INIT) && + (fnic->iport.state != FNIC_IPORT_STATE_LINK_WAIT)) ? + "Link Up" : "Link Down"); } static DEVICE_ATTR(fnic_state, S_IRUGO, fnic_show_state, NULL); diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 2619a2d4f5f1..5767862ae42f 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -7,6 +7,9 @@ #include <linux/vmalloc.h> #include "fnic.h" +extern int fnic_get_debug_info(struct stats_debug_info *debug_buffer, + struct fnic *fnic); + static struct dentry *fnic_trace_debugfs_root; static struct dentry *fnic_trace_debugfs_file; static struct dentry *fnic_trace_enable; @@ -593,6 +596,7 @@ static int fnic_stats_debugfs_open(struct inode *inode, debug->buf_size = buf_size; memset((void *)debug->debug_buffer, 0, buf_size); debug->buffer_len = fnic_get_stats_data(debug, fnic_stats); + debug->buffer_len += fnic_get_debug_info(debug, fnic); file->private_data = debug; @@ -673,26 +677,25 @@ static const struct file_operations fnic_reset_debugfs_fops = { * It will create file stats and reset_stats under statistics/host# directory * to log per fnic stats. */ -void fnic_stats_debugfs_init(struct fnic *fnic) +int fnic_stats_debugfs_init(struct fnic *fnic) { char name[16]; - snprintf(name, sizeof(name), "host%d", fnic->lport->host->host_no); + snprintf(name, sizeof(name), "host%d", fnic->host->host_no); fnic->fnic_stats_debugfs_host = debugfs_create_dir(name, fnic_stats_debugfs_root); - fnic->fnic_stats_debugfs_file = debugfs_create_file("stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_stats_debugfs_fops); - fnic->fnic_reset_debugfs_file = debugfs_create_file("reset_stats", S_IFREG|S_IRUGO|S_IWUSR, fnic->fnic_stats_debugfs_host, fnic, &fnic_reset_debugfs_fops); + return 0; } /* diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index a08293b2ad9f..1e8cd64f9a5c 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -14,701 +14,379 @@ #include <linux/workqueue.h> #include <scsi/fc/fc_fip.h> #include <scsi/fc/fc_els.h> -#include <scsi/fc/fc_fcoe.h> #include <scsi/fc_frame.h> -#include <scsi/libfc.h> +#include <linux/etherdevice.h> +#include <scsi/scsi_transport_fc.h> #include "fnic_io.h" #include "fnic.h" -#include "fnic_fip.h" +#include "fnic_fdls.h" +#include "fdls_fc.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +#include "fip.h" + +#define MAX_RESET_WAIT_COUNT 64 -static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS; -struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; -static void fnic_set_eth_mode(struct fnic *); -static void fnic_fcoe_send_vlan_req(struct fnic *fnic); -static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); -static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); -static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); -static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); +static uint8_t FCOE_ALL_FCF_MAC[6] = FC_FCOE_FLOGI_MAC; -void fnic_handle_link(struct work_struct *work) +/* + * Internal Functions + * This function will initialize the src_mac address to be + * used in outgoing frames + */ +static inline void fnic_fdls_set_fcoe_srcmac(struct fnic *fnic, + uint8_t *src_mac) { - struct fnic *fnic = container_of(work, struct fnic, link_work); - unsigned long flags; - int old_link_status; - u32 old_link_down_cnt; - u64 old_port_speed, new_port_speed; - - spin_lock_irqsave(&fnic->fnic_lock, flags); - - fnic->link_events = 1; /* less work to just set everytime*/ - - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - old_link_down_cnt = fnic->link_down_cnt; - old_link_status = fnic->link_status; - old_port_speed = atomic64_read( - &fnic->fnic_stats.misc_stats.current_port_speed); - - fnic->link_status = vnic_dev_link_status(fnic->vdev); - fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); - - new_port_speed = vnic_dev_port_speed(fnic->vdev); - atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, - new_port_speed); - if (old_port_speed != new_port_speed) - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Current vnic speed set to: %llu\n", - new_port_speed); - - switch (vnic_dev_port_speed(fnic->vdev)) { - case DCEM_PORTSPEED_10G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT; - break; - case DCEM_PORTSPEED_20G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_20GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_20GBIT; - break; - case DCEM_PORTSPEED_25G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT; - break; - case DCEM_PORTSPEED_40G: - case DCEM_PORTSPEED_4x10G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT; - break; - case DCEM_PORTSPEED_100G: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT; - fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT; - break; - default: - fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN; - fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN; - break; - } - - if (old_link_status == fnic->link_status) { - if (!fnic->link_status) { - /* DOWN -> DOWN */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_LE, "Link Status: DOWN->DOWN", - strlen("Link Status: DOWN->DOWN")); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "down->down\n"); - } else { - if (old_link_down_cnt != fnic->link_down_cnt) { - /* UP -> DOWN -> UP */ - fnic->lport->host_stats.link_failure_count++; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, - "Link Status:UP_DOWN_UP", - strlen("Link_Status:UP_DOWN_UP") - ); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "link down\n"); - fcoe_ctlr_link_down(&fnic->ctlr); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - /* start FCoE VLAN discovery */ - fnic_fc_trace_set_data( - fnic->lport->host->host_no, - FNIC_FC_LE, - "Link Status: UP_DOWN_UP_VLAN", - strlen( - "Link Status: UP_DOWN_UP_VLAN") - ); - fnic_fcoe_send_vlan_req(fnic); - return; - } - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "up->down->up: Link up\n"); - fcoe_ctlr_link_up(&fnic->ctlr); - } else { - /* UP -> UP */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: UP_UP", - strlen("Link Status: UP_UP")); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "up->up\n"); - } - } - } else if (fnic->link_status) { - /* DOWN -> UP */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - /* start FCoE VLAN discovery */ - fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_LE, "Link Status: DOWN_UP_VLAN", - strlen("Link Status: DOWN_UP_VLAN")); - fnic_fcoe_send_vlan_req(fnic); - - return; - } - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "down->up: Link up\n"); - fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP")); - fcoe_ctlr_link_up(&fnic->ctlr); - } else { - /* UP -> DOWN */ - fnic->lport->host_stats.link_failure_count++; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "up->down: Link down\n"); - fnic_fc_trace_set_data( - fnic->lport->host->host_no, FNIC_FC_LE, - "Link Status: UP_DOWN", - strlen("Link Status: UP_DOWN")); - if (fnic->config.flags & VFCF_FIP_CAPABLE) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "deleting fip-timer during link-down\n"); - del_timer_sync(&fnic->fip_timer); - } - fcoe_ctlr_link_down(&fnic->ctlr); - } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting src mac: %02x:%02x:%02x:%02x:%02x:%02x", + src_mac[0], src_mac[1], src_mac[2], src_mac[3], + src_mac[4], src_mac[5]); + memcpy(fnic->iport.fpma, src_mac, 6); } /* - * This function passes incoming fabric frames to libFC + * This function will initialize the dst_mac address to be + * used in outgoing frames */ -void fnic_handle_frame(struct work_struct *work) +static inline void fnic_fdls_set_fcoe_dstmac(struct fnic *fnic, + uint8_t *dst_mac) { - struct fnic *fnic = container_of(work, struct fnic, frame_work); - struct fc_lport *lp = fnic->lport; - unsigned long flags; - struct sk_buff *skb; - struct fc_frame *fp; - - while ((skb = skb_dequeue(&fnic->frame_queue))) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting dst mac: %02x:%02x:%02x:%02x:%02x:%02x", + dst_mac[0], dst_mac[1], dst_mac[2], dst_mac[3], + dst_mac[4], dst_mac[5]); - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(skb); - return; - } - fp = (struct fc_frame *)skb; - - /* - * If we're in a transitional state, just re-queue and return. - * The queue will be serviced when we get to a stable state. - */ - if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_head(&fnic->frame_queue, skb); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - fc_exch_recv(lp, fp); - } + memcpy(fnic->iport.fcfmac, dst_mac, 6); } -void fnic_fcoe_evlist_free(struct fnic *fnic) +void fnic_get_host_port_state(struct Scsi_Host *shost) { - struct fnic_event *fevt = NULL; - struct fnic_event *next = NULL; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + struct fnic_iport_s *iport = &fnic->iport; unsigned long flags; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (list_empty(&fnic->evlist)) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { - list_del(&fevt->list); - kfree(fevt); - } + if (!fnic->link_status) + fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN; + else if (iport->state == FNIC_IPORT_STATE_READY) + fc_host_port_state(shost) = FC_PORTSTATE_ONLINE; + else + fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE; spin_unlock_irqrestore(&fnic->fnic_lock, flags); } -void fnic_handle_event(struct work_struct *work) +void fnic_fdls_link_status_change(struct fnic *fnic, int linkup) { - struct fnic *fnic = container_of(work, struct fnic, event_work); - struct fnic_event *fevt = NULL; - struct fnic_event *next = NULL; - unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (list_empty(&fnic->evlist)) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "link up: %d, usefip: %d", linkup, iport->usefip); - list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { - if (fnic->stop_rx_link_events) { - list_del(&fevt->list); - kfree(fevt); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - /* - * If we're in a transitional state, just re-queue and return. - * The queue will be serviced when we get to a stable state. - */ - if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); - list_del(&fevt->list); - switch (fevt->event) { - case FNIC_EVT_START_VLAN_DISC: - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (linkup) { + if (iport->usefip) { + iport->state = FNIC_IPORT_STATE_FIP; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "link up: %d, usefip: %d", linkup, iport->usefip); fnic_fcoe_send_vlan_req(fnic); - spin_lock_irqsave(&fnic->fnic_lock, flags); - break; - case FNIC_EVT_START_FCF_DISC: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Start FCF Discovery\n"); - fnic_fcoe_start_fcf_disc(fnic); - break; - default: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Unknown event 0x%x\n", fevt->event); - break; + } else { + iport->state = FNIC_IPORT_STATE_FABRIC_DISC; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport->state: %d", iport->state); + fnic_fdls_disc_start(iport); } - kfree(fevt); + } else { + iport->state = FNIC_IPORT_STATE_LINK_WAIT; + if (!is_zero_ether_addr(iport->fpma)) + vnic_dev_del_addr(fnic->vdev, iport->fpma); + fnic_common_fip_cleanup(fnic); + fnic_fdls_link_down(iport); + } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } -/** - * is_fnic_fip_flogi_reject() - Check if the Received FIP FLOGI frame is rejected - * @fip: The FCoE controller that received the frame - * @skb: The received FIP frame - * - * Returns non-zero if the frame is rejected with unsupported cmd with - * insufficient resource els explanation. + +/* + * FPMA can be either taken from ethhdr(dst_mac) or flogi resp + * or derive from FC_MAP and FCID combination. While it should be + * same, revisit this if there is any possibility of not-correct. */ -static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, - struct sk_buff *skb) +void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid) { - struct fc_lport *lport = fip->lp; - struct fip_header *fiph; - struct fc_frame_header *fh = NULL; - struct fip_desc *desc; - struct fip_encaps *els; - u16 op; - u8 els_op; - u8 sub; - - size_t rlen; - size_t dlen = 0; - - if (skb_linearize(skb)) - return 0; + struct fnic *fnic = iport->fnic; + struct ethhdr *ethhdr = (struct ethhdr *) rx_frame; + uint8_t fcmac[6] = { 0x0E, 0xFC, 0x00, 0x00, 0x00, 0x00 }; - if (skb->len < sizeof(*fiph)) - return 0; + memcpy(&fcmac[3], fcid, 3); - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "learn fcoe: dst_mac: %02x:%02x:%02x:%02x:%02x:%02x", + ethhdr->h_dest[0], ethhdr->h_dest[1], + ethhdr->h_dest[2], ethhdr->h_dest[3], + ethhdr->h_dest[4], ethhdr->h_dest[5]); - if (op != FIP_OP_LS) - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "learn fcoe: fc_mac: %02x:%02x:%02x:%02x:%02x:%02x", + fcmac[0], fcmac[1], fcmac[2], fcmac[3], fcmac[4], + fcmac[5]); - if (sub != FIP_SC_REP) - return 0; - - rlen = ntohs(fiph->fip_dl_len) * 4; - if (rlen + sizeof(*fiph) > skb->len) - return 0; - - desc = (struct fip_desc *)(fiph + 1); - dlen = desc->fip_dlen * FIP_BPW; + fnic_fdls_set_fcoe_srcmac(fnic, fcmac); + fnic_fdls_set_fcoe_dstmac(fnic, ethhdr->h_source); +} - if (desc->fip_dtype == FIP_DT_FLOGI) { +void fnic_fdls_init(struct fnic *fnic, int usefip) +{ + struct fnic_iport_s *iport = &fnic->iport; - if (dlen < sizeof(*els) + sizeof(*fh) + 1) - return 0; + /* Initialize iPort structure */ + iport->state = FNIC_IPORT_STATE_INIT; + iport->fnic = fnic; + iport->usefip = usefip; - els = (struct fip_encaps *)desc; - fh = (struct fc_frame_header *)(els + 1); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iportsrcmac: %02x:%02x:%02x:%02x:%02x:%02x", + iport->hwmac[0], iport->hwmac[1], iport->hwmac[2], + iport->hwmac[3], iport->hwmac[4], iport->hwmac[5]); - if (!fh) - return 0; + INIT_LIST_HEAD(&iport->tport_list); + INIT_LIST_HEAD(&iport->tport_list_pending_del); - /* - * ELS command code, reason and explanation should be = Reject, - * unsupported command and insufficient resource - */ - els_op = *(u8 *)(fh + 1); - if (els_op == ELS_LS_RJT) { - shost_printk(KERN_INFO, lport->host, - "Flogi Request Rejected by Switch\n"); - return 1; - } - shost_printk(KERN_INFO, lport->host, - "Flogi Request Accepted by Switch\n"); - } - return 0; + fnic_fdls_disc_init(iport); } -static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +void fnic_handle_link(struct work_struct *work) { - struct fcoe_ctlr *fip = &fnic->ctlr; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - struct sk_buff *skb; - char *eth_fr; - struct fip_vlan *vlan; - u64 vlan_tov; + struct fnic *fnic = container_of(work, struct fnic, link_work); + int old_link_status; + u32 old_link_down_cnt; + int max_count = 0; - fnic_fcoe_reset_vlans(fnic); - fnic->set_vlan(fnic, 0); + if (vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Interrupt mode is not MSI\n"); - if (printk_ratelimit()) - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Sending VLAN request...\n"); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); - skb = dev_alloc_skb(sizeof(struct fip_vlan)); - if (!skb) + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Stop link rx events\n"); return; - - eth_fr = (char *)skb->data; - vlan = (struct fip_vlan *)eth_fr; - - memset(vlan, 0, sizeof(*vlan)); - memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); - memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); - vlan->eth.h_proto = htons(ETH_P_FIP); - - vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); - vlan->fip.fip_op = htons(FIP_OP_VLAN); - vlan->fip.fip_subcode = FIP_SC_VL_REQ; - vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); - - vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; - vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; - memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); - - vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; - vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; - put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); - atomic64_inc(&fnic_stats->vlan_stats.vlan_disc_reqs); - - skb_put(skb, sizeof(*vlan)); - skb->protocol = htons(ETH_P_FIP); - skb_reset_mac_header(skb); - skb_reset_network_header(skb); - fip->send(fip, skb); - - /* set a timer so that we can retry if there no response */ - vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); - mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); -} - -static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) -{ - struct fcoe_ctlr *fip = &fnic->ctlr; - struct fip_header *fiph; - struct fip_desc *desc; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - u16 vid; - size_t rlen; - size_t dlen; - struct fcoe_vlan *vlan; - u64 sol_time; - unsigned long flags; - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Received VLAN response...\n"); - - fiph = (struct fip_header *) skb->data; - - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", - ntohs(fiph->fip_op), fiph->fip_subcode); - - rlen = ntohs(fiph->fip_dl_len) * 4; - fnic_fcoe_reset_vlans(fnic); - spin_lock_irqsave(&fnic->vlans_lock, flags); - desc = (struct fip_desc *)(fiph + 1); - while (rlen > 0) { - dlen = desc->fip_dlen * FIP_BPW; - switch (desc->fip_dtype) { - case FIP_DT_VLAN: - vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); - shost_printk(KERN_INFO, fnic->lport->host, - "process_vlan_resp: FIP VLAN %d\n", vid); - vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); - if (!vlan) { - /* retry from timer */ - spin_unlock_irqrestore(&fnic->vlans_lock, - flags); - goto out; - } - vlan->vid = vid & 0x0fff; - vlan->state = FIP_VLAN_AVAIL; - list_add_tail(&vlan->list, &fnic->vlans); - break; - } - desc = (struct fip_desc *)((char *)desc + dlen); - rlen -= dlen; } - /* any VLAN descriptors present ? */ - if (list_empty(&fnic->vlans)) { - /* retry from timer */ - atomic64_inc(&fnic_stats->vlan_stats.resp_withno_vlanID); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "No VLAN descriptors in FIP VLAN response\n"); - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - goto out; + /* Do not process if the fnic is already in transitional state */ + if ((fnic->state != FNIC_IN_ETH_MODE) + && (fnic->state != FNIC_IN_FC_MODE)) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic in transitional state: %d. link up: %d ignored", + fnic->state, vnic_dev_link_status(fnic->vdev)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Current link status: %d iport state: %d\n", + fnic->link_status, fnic->iport.state); + return; } - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - vlan->sol_count++; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - - /* start the solicitation */ - fcoe_ctlr_link_up(fip); - - sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); -out: - return; -} - -static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) -{ - unsigned long flags; - struct fcoe_vlan *vlan; - u64 sol_time; - - spin_lock_irqsave(&fnic->vlans_lock, flags); - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - vlan->sol_count = 1; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - - /* start the solicitation */ - fcoe_ctlr_link_up(&fnic->ctlr); - - sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); -} - -static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) -{ - unsigned long flags; - struct fcoe_vlan *fvlan; + old_link_down_cnt = fnic->link_down_cnt; + old_link_status = fnic->link_status; + fnic->link_status = vnic_dev_link_status(fnic->vdev); + fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (list_empty(&fnic->vlans)) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return -EINVAL; + while (fnic->reset_in_progress == IN_PROGRESS) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic reset in progress. Link event needs to wait\n"); + + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "waiting for reset completion\n"); + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(5000)); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "woken up from reset completion wait\n"); + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + + max_count++; + if (max_count >= MAX_RESET_WAIT_COUNT) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Rstth waited for too long. Skipping handle link event\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } } - - fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - if (fvlan->state == FIP_VLAN_USED) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return 0; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Marking fnic reset in progress\n"); + fnic->reset_in_progress = IN_PROGRESS; + + if ((vnic_dev_get_intr_mode(fnic->vdev) != VNIC_DEV_INTR_MODE_MSI) || + (fnic->link_status != old_link_status)) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "old link status: %d link status: %d\n", + old_link_status, (int) fnic->link_status); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "old down count %d down count: %d\n", + old_link_down_cnt, (int) fnic->link_down_cnt); } - if (fvlan->state == FIP_VLAN_SENT) { - fvlan->state = FIP_VLAN_USED; - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return 0; + if (old_link_status == fnic->link_status) { + if (!fnic->link_status) { + /* DOWN -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->down\n"); + } else { + if (old_link_down_cnt != fnic->link_down_cnt) { + /* UP -> DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->down. Link down\n"); + fnic_fdls_link_status_change(fnic, 0); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->up. Link up\n"); + fnic_fdls_link_status_change(fnic, 1); + } else { + /* UP -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->up\n"); + } + } + } else if (fnic->link_status) { + /* DOWN -> UP */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "down->up. Link up\n"); + fnic_fdls_link_status_change(fnic, 1); + } else { + /* UP -> DOWN */ + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "up->down. Link down\n"); + fnic_fdls_link_status_change(fnic, 0); } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - return -EINVAL; -} -static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) -{ - struct fnic_event *fevt; - unsigned long flags; - - fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); - if (!fevt) - return; + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); - fevt->fnic = fnic; - fevt->event = ev; - - spin_lock_irqsave(&fnic->fnic_lock, flags); - list_add_tail(&fevt->list, &fnic->evlist); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - schedule_work(&fnic->event_work); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Marking fnic reset completion\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } -static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +void fnic_handle_frame(struct work_struct *work) { - struct fip_header *fiph; - int ret = 1; - u16 op; - u8 sub; + struct fnic *fnic = container_of(work, struct fnic, frame_work); + struct fnic_frame_list *cur_frame, *next; + int fchdr_offset = 0; - if (!skb || !(skb->data)) - return -1; + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(cur_frame, next, &fnic->frame_queue, links) { + if (fnic->stop_rx_link_events) { + list_del(&cur_frame->links); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + kfree(cur_frame->fp); + mempool_free(cur_frame, fnic->frame_elem_pool); + return; + } - if (skb_linearize(skb)) - goto drop; + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Cannot process frame in transitional state\n"); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + return; + } - fiph = (struct fip_header *)skb->data; - op = ntohs(fiph->fip_op); - sub = fiph->fip_subcode; + list_del(&cur_frame->links); - if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) - goto drop; + /* Frames from FCP_RQ will have ethhdrs stripped off */ + fchdr_offset = (cur_frame->rx_ethhdr_stripped) ? + 0 : FNIC_ETH_FCOE_HDRS_OFFSET; - if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) - goto drop; + fnic_fdls_recv_frame(&fnic->iport, cur_frame->fp, + cur_frame->frame_len, fchdr_offset); - if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { - if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) - goto drop; - /* pass it on to fcoe */ - ret = 1; - } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_NOTE) { - /* set the vlan as used */ - fnic_fcoe_process_vlan_resp(fnic, skb); - ret = 0; - } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { - /* received CVL request, restart vlan disc */ - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - /* pass it on to fcoe */ - ret = 1; + kfree(cur_frame->fp); + mempool_free(cur_frame, fnic->frame_elem_pool); } -drop: - return ret; + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } void fnic_handle_fip_frame(struct work_struct *work) { + struct fnic_frame_list *cur_frame, *next; struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - unsigned long flags; - struct sk_buff *skb; - struct ethhdr *eh; - while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { - spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Processing FIP frame\n"); + + spin_lock_irqsave(&fnic->fnic_lock, fnic->lock_flags); + list_for_each_entry_safe(cur_frame, next, &fnic->fip_frame_queue, + links) { if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - dev_kfree_skb(skb); + list_del(&cur_frame->links); + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); + kfree(cur_frame->fp); + kfree(cur_frame); return; } + /* * If we're in a transitional state, just re-queue and return. * The queue will be serviced when we get to a stable state. */ if (fnic->state != FNIC_IN_FC_MODE && - fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_head(&fnic->fip_frame_queue, skb); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); return; } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - eh = (struct ethhdr *)skb->data; - if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { - dev_kfree_skb(skb); - continue; - } - /* - * If there's FLOGI rejects - clear all - * fcf's & restart from scratch - */ - if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { - atomic64_inc( - &fnic_stats->vlan_stats.flogi_rejects); - shost_printk(KERN_INFO, fnic->lport->host, - "Trigger a Link down - VLAN Disc\n"); - fcoe_ctlr_link_down(&fnic->ctlr); - /* start FCoE VLAN discovery */ - fnic_fcoe_send_vlan_req(fnic); - dev_kfree_skb(skb); - continue; - } - fcoe_ctlr_recv(&fnic->ctlr, skb); - continue; + + list_del(&cur_frame->links); + + if (fdls_fip_recv_frame(fnic, cur_frame->fp)) { + kfree(cur_frame->fp); + kfree(cur_frame); } } + spin_unlock_irqrestore(&fnic->fnic_lock, fnic->lock_flags); } /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. - * @skb: Ethernet Frame. + * @fp: Ethernet Frame. */ -static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) +static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, void *fp) { - struct fc_frame *fp; struct ethhdr *eh; - struct fcoe_hdr *fcoe_hdr; - struct fcoe_crc_eof *ft; + struct fnic_frame_list *fip_fr_elem; + unsigned long flags; - /* - * Undo VLAN encapsulation if present. - */ - eh = (struct ethhdr *)skb->data; - if (eh->h_proto == htons(ETH_P_8021Q)) { - memmove((u8 *)eh + VLAN_HLEN, eh, ETH_ALEN * 2); - eh = skb_pull(skb, VLAN_HLEN); - skb_reset_mac_header(skb); - } - if (eh->h_proto == htons(ETH_P_FIP)) { - if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { - printk(KERN_ERR "Dropped FIP frame, as firmware " - "uses non-FIP mode, Enable FIP " - "using UCSM\n"); - goto drop; - } - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - skb_queue_tail(&fnic->fip_frame_queue, skb); + eh = (struct ethhdr *) fp; + if ((eh->h_proto == cpu_to_be16(ETH_P_FIP)) && (fnic->iport.usefip)) { + fip_fr_elem = (struct fnic_frame_list *) + kzalloc(sizeof(struct fnic_frame_list), GFP_ATOMIC); + if (!fip_fr_elem) + return 0; + fip_fr_elem->fp = fp; + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fip_fr_elem->links, &fnic->fip_frame_queue); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); queue_work(fnic_fip_queue, &fnic->fip_frame_work); - return 1; /* let caller know packet was used */ - } - if (eh->h_proto != htons(ETH_P_FCOE)) - goto drop; - skb_set_network_header(skb, sizeof(*eh)); - skb_pull(skb, sizeof(*eh)); - - fcoe_hdr = (struct fcoe_hdr *)skb->data; - if (FC_FCOE_DECAPS_VER(fcoe_hdr) != FC_FCOE_VER) - goto drop; - - fp = (struct fc_frame *)skb; - fc_frame_init(fp); - fr_sof(fp) = fcoe_hdr->fcoe_sof; - skb_pull(skb, sizeof(struct fcoe_hdr)); - skb_reset_transport_header(skb); - - ft = (struct fcoe_crc_eof *)(skb->data + skb->len - sizeof(*ft)); - fr_eof(fp) = ft->fcoe_eof; - skb_trim(skb, skb->len - sizeof(*ft)); - return 0; -drop: - dev_kfree_skb_irq(skb); - return -1; + return 1; /* let caller know packet was used */ + } else + return 0; } /** @@ -720,206 +398,147 @@ drop: */ void fnic_update_mac_locked(struct fnic *fnic, u8 *new) { - u8 *ctl = fnic->ctlr.ctl_src_addr; + struct fnic_iport_s *iport = &fnic->iport; + u8 *ctl = iport->hwmac; u8 *data = fnic->data_src_addr; if (is_zero_ether_addr(new)) new = ctl; if (ether_addr_equal(data, new)) return; - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "update_mac %pM\n", new); + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Update MAC: %u\n", *new); + if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl)) vnic_dev_del_addr(fnic->vdev, data); + memcpy(data, new, ETH_ALEN); if (!ether_addr_equal(new, ctl)) vnic_dev_add_addr(fnic->vdev, new); } -/** - * fnic_update_mac() - set data MAC address and filters. - * @lport: local port. - * @new: newly-assigned FCoE MAC address. - */ -void fnic_update_mac(struct fc_lport *lport, u8 *new) -{ - struct fnic *fnic = lport_priv(lport); - - spin_lock_irq(&fnic->fnic_lock); - fnic_update_mac_locked(fnic, new); - spin_unlock_irq(&fnic->fnic_lock); -} - -/** - * fnic_set_port_id() - set the port_ID after successful FLOGI. - * @lport: local port. - * @port_id: assigned FC_ID. - * @fp: received frame containing the FLOGI accept or NULL. - * - * This is called from libfc when a new FC_ID has been assigned. - * This causes us to reset the firmware to FC_MODE and setup the new MAC - * address and FC_ID. - * - * It is also called with FC_ID 0 when we're logged off. - * - * If the FC_ID is due to point-to-point, fp may be NULL. - */ -void fnic_set_port_id(struct fc_lport *lport, u32 port_id, struct fc_frame *fp) -{ - struct fnic *fnic = lport_priv(lport); - u8 *mac; - int ret; - - FNIC_FCS_DBG(KERN_DEBUG, lport->host, fnic->fnic_num, - "set port_id 0x%x fp 0x%p\n", - port_id, fp); - - /* - * If we're clearing the FC_ID, change to use the ctl_src_addr. - * Set ethernet mode to send FLOGI. - */ - if (!port_id) { - fnic_update_mac(lport, fnic->ctlr.ctl_src_addr); - fnic_set_eth_mode(fnic); - return; - } - - if (fp) { - mac = fr_cb(fp)->granted_mac; - if (is_zero_ether_addr(mac)) { - /* non-FIP - FLOGI already accepted - ignore return */ - fcoe_ctlr_recv_flogi(&fnic->ctlr, lport, fp); - } - fnic_update_mac(lport, mac); - } - - /* Change state to reflect transition to FC mode */ - spin_lock_irq(&fnic->fnic_lock); - if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) - fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; - else { - FNIC_FCS_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "Unexpected fnic state: %s processing FLOGI response", - fnic_state_to_str(fnic->state)); - spin_unlock_irq(&fnic->fnic_lock); - return; - } - spin_unlock_irq(&fnic->fnic_lock); - - /* - * Send FLOGI registration to firmware to set up FC mode. - * The new address will be set up when registration completes. - */ - ret = fnic_flogi_reg_handler(fnic, port_id); - - if (ret < 0) { - spin_lock_irq(&fnic->fnic_lock); - if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) - fnic->state = FNIC_IN_ETH_MODE; - spin_unlock_irq(&fnic->fnic_lock); - } -} - static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc *cq_desc, struct vnic_rq_buf *buf, int skipped __attribute__((unused)), void *opaque) { struct fnic *fnic = vnic_dev_priv(rq->vdev); - struct sk_buff *skb; - struct fc_frame *fp; + uint8_t *fp; struct fnic_stats *fnic_stats = &fnic->fnic_stats; + unsigned int ethhdr_stripped; u8 type, color, eop, sop, ingress_port, vlan_stripped; - u8 fcoe = 0, fcoe_sof, fcoe_eof; - u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0; - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; - u8 ipv6, ipv4, ipv4_fragment, rss_type, csum_not_calc; + u8 fcoe_fnic_crc_ok = 1, fcoe_enc_error = 0; u8 fcs_ok = 1, packet_error = 0; - u16 q_number, completed_index, bytes_written = 0, vlan, checksum; + u16 q_number, completed_index, vlan; u32 rss_hash; + u16 checksum; + u8 csum_not_calc, rss_type, ipv4, ipv6, ipv4_fragment; + u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok; + u8 fcoe = 0, fcoe_sof, fcoe_eof; u16 exchange_id, tmpl; u8 sof = 0; u8 eof = 0; u32 fcp_bytes_written = 0; + u16 enet_bytes_written = 0; + u32 bytes_written = 0; unsigned long flags; + struct fnic_frame_list *frame_elem = NULL; + struct ethhdr *eh; dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, - DMA_FROM_DEVICE); - skb = buf->os_buf; - fp = (struct fc_frame *)skb; + DMA_FROM_DEVICE); + fp = (uint8_t *) buf->os_buf; buf->os_buf = NULL; cq_desc_dec(cq_desc, &type, &color, &q_number, &completed_index); if (type == CQ_DESC_TYPE_RQ_FCP) { - cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &eop, &sop, &fcoe_fc_crc_ok, &exchange_id, - &tmpl, &fcp_bytes_written, &sof, &eof, - &ingress_port, &packet_error, - &fcoe_enc_error, &fcs_ok, &vlan_stripped, - &vlan); - skb_trim(skb, fcp_bytes_written); - fr_sof(fp) = sof; - fr_eof(fp) = eof; - + cq_fcp_rq_desc_dec((struct cq_fcp_rq_desc *) cq_desc, &type, + &color, &q_number, &completed_index, &eop, &sop, + &fcoe_fnic_crc_ok, &exchange_id, &tmpl, + &fcp_bytes_written, &sof, &eof, &ingress_port, + &packet_error, &fcoe_enc_error, &fcs_ok, + &vlan_stripped, &vlan); + ethhdr_stripped = 1; + bytes_written = fcp_bytes_written; } else if (type == CQ_DESC_TYPE_RQ_ENET) { - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc, - &type, &color, &q_number, &completed_index, - &ingress_port, &fcoe, &eop, &sop, - &rss_type, &csum_not_calc, &rss_hash, - &bytes_written, &packet_error, - &vlan_stripped, &vlan, &checksum, - &fcoe_sof, &fcoe_fc_crc_ok, - &fcoe_enc_error, &fcoe_eof, - &tcp_udp_csum_ok, &udp, &tcp, - &ipv4_csum_ok, &ipv6, &ipv4, - &ipv4_fragment, &fcs_ok); - skb_trim(skb, bytes_written); + cq_enet_rq_desc_dec((struct cq_enet_rq_desc *) cq_desc, &type, + &color, &q_number, &completed_index, + &ingress_port, &fcoe, &eop, &sop, &rss_type, + &csum_not_calc, &rss_hash, &enet_bytes_written, + &packet_error, &vlan_stripped, &vlan, + &checksum, &fcoe_sof, &fcoe_fnic_crc_ok, + &fcoe_enc_error, &fcoe_eof, &tcp_udp_csum_ok, + &udp, &tcp, &ipv4_csum_ok, &ipv6, &ipv4, + &ipv4_fragment, &fcs_ok); + + ethhdr_stripped = 0; + bytes_written = enet_bytes_written; + if (!fcs_ok) { atomic64_inc(&fnic_stats->misc_stats.frame_errors); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fcs error. dropping packet.\n"); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic 0x%p fcs error. Dropping packet.\n", fnic); goto drop; } - if (fnic_import_rq_eth_pkt(fnic, skb)) - return; + eh = (struct ethhdr *) fp; + if (eh->h_proto != cpu_to_be16(ETH_P_FCOE)) { + if (fnic_import_rq_eth_pkt(fnic, fp)) + return; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Dropping h_proto 0x%x", + be16_to_cpu(eh->h_proto)); + goto drop; + } } else { - /* wrong CQ type*/ - shost_printk(KERN_ERR, fnic->lport->host, - "fnic rq_cmpl wrong cq type x%x\n", type); + /* wrong CQ type */ + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic rq_cmpl wrong cq type x%x\n", type); goto drop; } - if (!fcs_ok || packet_error || !fcoe_fc_crc_ok || fcoe_enc_error) { + if (!fcs_ok || packet_error || !fcoe_fnic_crc_ok || fcoe_enc_error) { atomic64_inc(&fnic_stats->misc_stats.frame_errors); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic rq_cmpl fcoe x%x fcsok x%x" - " pkterr x%x fcoe_fc_crc_ok x%x, fcoe_enc_err" - " x%x\n", - fcoe, fcs_ok, packet_error, - fcoe_fc_crc_ok, fcoe_enc_error); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fcoe %x fcsok %x pkterr %x ffco %x fee %x\n", + fcoe, fcs_ok, packet_error, + fcoe_fnic_crc_ok, fcoe_enc_error); goto drop; } spin_lock_irqsave(&fnic->fnic_lock, flags); if (fnic->stop_rx_link_events) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic->stop_rx_link_events: %d\n", + fnic->stop_rx_link_events); goto drop; } - fr_dev(fp) = fnic->lport; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV, - (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); + + frame_elem = mempool_alloc(fnic->frame_elem_pool, + GFP_ATOMIC | __GFP_ZERO); + if (!frame_elem) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for frame elem"); + goto drop; } + frame_elem->fp = fp; + frame_elem->rx_ethhdr_stripped = ethhdr_stripped; + frame_elem->frame_len = bytes_written; - skb_queue_tail(&fnic->frame_queue, skb); - queue_work(fnic_event_queue, &fnic->frame_work); + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&frame_elem->links, &fnic->frame_queue); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + queue_work(fnic_event_queue, &fnic->frame_work); return; + drop: - dev_kfree_skb_irq(skb); + kfree(fp); } static int fnic_rq_cmpl_handler_cont(struct vnic_dev *vdev, @@ -945,10 +564,10 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) cur_work_done = vnic_cq_service(&fnic->cq[i], rq_work_to_do, fnic_rq_cmpl_handler_cont, NULL); - if (cur_work_done) { + if (cur_work_done && fnic->stop_rx_link_events != 1) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "fnic_alloc_rq_frame can't alloc" " frame\n"); } @@ -966,218 +585,179 @@ int fnic_rq_cmpl_handler(struct fnic *fnic, int rq_work_to_do) int fnic_alloc_rq_frame(struct vnic_rq *rq) { struct fnic *fnic = vnic_dev_priv(rq->vdev); - struct sk_buff *skb; + void *buf; u16 len; dma_addr_t pa; - int r; + int ret; - len = FC_FRAME_HEADROOM + FC_MAX_FRAME + FC_FRAME_TAILROOM; - skb = dev_alloc_skb(len); - if (!skb) { - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Unable to allocate RQ sk_buff\n"); + len = FNIC_FRAME_HT_ROOM; + buf = kmalloc(len, GFP_ATOMIC); + if (!buf) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unable to allocate RQ buffer of size: %d\n", len); return -ENOMEM; } - skb_reset_mac_header(skb); - skb_reset_transport_header(skb); - skb_reset_network_header(skb); - skb_put(skb, len); - pa = dma_map_single(&fnic->pdev->dev, skb->data, len, DMA_FROM_DEVICE); + + pa = dma_map_single(&fnic->pdev->dev, buf, len, DMA_FROM_DEVICE); if (dma_mapping_error(&fnic->pdev->dev, pa)) { - r = -ENOMEM; - printk(KERN_ERR "PCI mapping failed with error %d\n", r); - goto free_skb; + ret = -ENOMEM; + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "PCI mapping failed with error %d\n", ret); + goto free_buf; } - fnic_queue_rq_desc(rq, skb, pa, len); + fnic_queue_rq_desc(rq, buf, pa, len); return 0; - -free_skb: - kfree_skb(skb); - return r; +free_buf: + kfree(buf); + return ret; } void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) { - struct fc_frame *fp = buf->os_buf; + void *rq_buf = buf->os_buf; struct fnic *fnic = vnic_dev_priv(rq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_FROM_DEVICE); - dev_kfree_skb(fp_skb(fp)); + kfree(rq_buf); buf->os_buf = NULL; } -/** - * fnic_eth_send() - Send Ethernet frame. - * @fip: fcoe_ctlr instance. - * @skb: Ethernet Frame, FIP, without VLAN encapsulation. - */ -void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb) -{ - struct fnic *fnic = fnic_from_ctlr(fip); - struct vnic_wq *wq = &fnic->wq[0]; - dma_addr_t pa; - struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; - unsigned long flags; - - if (!fnic->vlan_hw_insert) { - eth_hdr = (struct ethhdr *)skb_mac_header(skb); - vlan_hdr = skb_push(skb, sizeof(*vlan_hdr) - sizeof(*eth_hdr)); - memcpy(vlan_hdr, eth_hdr, 2 * ETH_ALEN); - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto; - vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - } else { - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, - FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); - } - } - - pa = dma_map_single(&fnic->pdev->dev, skb->data, skb->len, - DMA_TO_DEVICE); - if (dma_mapping_error(&fnic->pdev->dev, pa)) { - printk(KERN_ERR "DMA mapping failed\n"); - goto free_skb; - } - - spin_lock_irqsave(&fnic->wq_lock[0], flags); - if (!vnic_wq_desc_avail(wq)) - goto irq_restore; - - fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, - 0 /* hw inserts cos value */, - fnic->vlan_id, 1); - spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - return; - -irq_restore: - spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - dma_unmap_single(&fnic->pdev->dev, pa, skb->len, DMA_TO_DEVICE); -free_skb: - kfree_skb(skb); -} - /* * Send FC frame. */ -static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp) +static int fnic_send_frame(struct fnic *fnic, void *frame, int frame_len) { struct vnic_wq *wq = &fnic->wq[0]; - struct sk_buff *skb; dma_addr_t pa; - struct ethhdr *eth_hdr; - struct vlan_ethhdr *vlan_hdr; - struct fcoe_hdr *fcoe_hdr; - struct fc_frame_header *fh; - u32 tot_len, eth_hdr_len; int ret = 0; unsigned long flags; - fh = fc_frame_header_get(fp); - skb = fp_skb(fp); + pa = dma_map_single(&fnic->pdev->dev, frame, frame_len, DMA_TO_DEVICE); - if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) && - fcoe_ctlr_els_send(&fnic->ctlr, fnic->lport, skb)) - return 0; - - if (!fnic->vlan_hw_insert) { - eth_hdr_len = sizeof(*vlan_hdr) + sizeof(*fcoe_hdr); - vlan_hdr = skb_push(skb, eth_hdr_len); - eth_hdr = (struct ethhdr *)vlan_hdr; - vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q); - vlan_hdr->h_vlan_encapsulated_proto = htons(ETH_P_FCOE); - vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id); - fcoe_hdr = (struct fcoe_hdr *)(vlan_hdr + 1); - } else { - eth_hdr_len = sizeof(*eth_hdr) + sizeof(*fcoe_hdr); - eth_hdr = skb_push(skb, eth_hdr_len); - eth_hdr->h_proto = htons(ETH_P_FCOE); - fcoe_hdr = (struct fcoe_hdr *)(eth_hdr + 1); - } - - if (fnic->ctlr.map_dest) - fc_fcoe_set_mac(eth_hdr->h_dest, fh->fh_d_id); - else - memcpy(eth_hdr->h_dest, fnic->ctlr.dest_addr, ETH_ALEN); - memcpy(eth_hdr->h_source, fnic->data_src_addr, ETH_ALEN); - - tot_len = skb->len; - BUG_ON(tot_len % 4); - - memset(fcoe_hdr, 0, sizeof(*fcoe_hdr)); - fcoe_hdr->fcoe_sof = fr_sof(fp); - if (FC_FCOE_VER) - FC_FCOE_ENCAPS_VER(fcoe_hdr, FC_FCOE_VER); - - pa = dma_map_single(&fnic->pdev->dev, eth_hdr, tot_len, DMA_TO_DEVICE); - if (dma_mapping_error(&fnic->pdev->dev, pa)) { - ret = -ENOMEM; - printk(KERN_ERR "DMA map failed with error %d\n", ret); - goto free_skb_on_err; - } - - if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND, - (char *)eth_hdr, tot_len)) != 0) { - printk(KERN_ERR "fnic ctlr frame trace error!!!"); + if ((fnic_fc_trace_set_data(fnic->fnic_num, + FNIC_FC_SEND | 0x80, (char *) frame, + frame_len)) != 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic ctlr frame trace error"); } spin_lock_irqsave(&fnic->wq_lock[0], flags); if (!vnic_wq_desc_avail(wq)) { - dma_unmap_single(&fnic->pdev->dev, pa, tot_len, DMA_TO_DEVICE); + dma_unmap_single(&fnic->pdev->dev, pa, frame_len, DMA_TO_DEVICE); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "vnic work queue descriptor is not available"); ret = -1; - goto irq_restore; + goto fnic_send_frame_end; } - fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), - 0 /* hw inserts cos value */, - fnic->vlan_id, 1, 1, 1); + /* hw inserts cos value */ + fnic_queue_wq_desc(wq, frame, pa, frame_len, FC_EOF_T, + 0, fnic->vlan_id, 1, 1, 1); -irq_restore: +fnic_send_frame_end: spin_unlock_irqrestore(&fnic->wq_lock[0], flags); - -free_skb_on_err: - if (ret) - dev_kfree_skb_any(fp_skb(fp)); - return ret; } -/* - * fnic_send - * Routine to send a raw frame +/** + * fdls_send_fcoe_frame - send a filled-in FC frame, filling in eth and FCoE + * info. This interface is used only in the non fast path. (login, fabric + * registrations etc.) + * + * @fnic: fnic instance + * @frame: frame structure with FC payload filled in + * @frame_size: length of the frame to be sent + * @srcmac: source mac address + * @dstmac: destination mac address + * + * Called with the fnic lock held. */ -int fnic_send(struct fc_lport *lp, struct fc_frame *fp) +static int +fdls_send_fcoe_frame(struct fnic *fnic, void *frame, int frame_size, + uint8_t *srcmac, uint8_t *dstmac) { - struct fnic *fnic = lport_priv(lp); - unsigned long flags; + struct ethhdr *pethhdr; + struct fcoe_hdr *pfcoe_hdr; + struct fnic_frame_list *frame_elem; + int len = frame_size; + int ret; + struct fc_frame_header *fchdr = (struct fc_frame_header *) (frame + + FNIC_ETH_FCOE_HDRS_OFFSET); - if (fnic->in_remove) { - dev_kfree_skb(fp_skb(fp)); - return -1; - } + pethhdr = (struct ethhdr *) frame; + pethhdr->h_proto = cpu_to_be16(ETH_P_FCOE); + memcpy(pethhdr->h_source, srcmac, ETH_ALEN); + memcpy(pethhdr->h_dest, dstmac, ETH_ALEN); + + pfcoe_hdr = (struct fcoe_hdr *) (frame + sizeof(struct ethhdr)); + pfcoe_hdr->fcoe_sof = FC_SOF_I3; /* * Queue frame if in a transitional state. * This occurs while registering the Port_ID / MAC address after FLOGI. */ - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state != FNIC_IN_FC_MODE && fnic->state != FNIC_IN_ETH_MODE) { - skb_queue_tail(&fnic->tx_queue, fp_skb(fp)); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic->state != FNIC_IN_FC_MODE) + && (fnic->state != FNIC_IN_ETH_MODE)) { + frame_elem = mempool_alloc(fnic->frame_elem_pool, + GFP_ATOMIC | __GFP_ZERO); + if (!frame_elem) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to allocate memory for frame elem"); + return -ENOMEM; + } + + FNIC_FCS_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Queueing FC frame: sid/did/type/oxid = 0x%x/0x%x/0x%x/0x%x\n", + ntoh24(fchdr->fh_s_id), ntoh24(fchdr->fh_d_id), + fchdr->fh_type, FNIC_STD_GET_OX_ID(fchdr)); + frame_elem->fp = frame; + frame_elem->frame_len = len; + list_add_tail(&frame_elem->links, &fnic->tx_queue); return 0; } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return fnic_send_frame(fnic, fp); + fnic_debug_dump_fc_frame(fnic, fchdr, frame_size, "Outgoing"); + + ret = fnic_send_frame(fnic, frame, len); + return ret; +} + +void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + int frame_size) +{ + struct fnic *fnic = iport->fnic; + uint8_t *dstmac, *srcmac; + + /* If module unload is in-progress, don't send */ + if (fnic->in_remove) + return; + + if (iport->fabric.flags & FNIC_FDLS_FPMA_LEARNT) { + srcmac = iport->fpma; + dstmac = iport->fcfmac; + } else { + srcmac = iport->hwmac; + dstmac = FCOE_ALL_FCF_MAC; + } + + fdls_send_fcoe_frame(fnic, frame, frame_size, srcmac, dstmac); +} + +int +fnic_send_fip_frame(struct fnic_iport_s *iport, void *frame, + int frame_size) +{ + struct fnic *fnic = iport->fnic; + + if (fnic->in_remove) + return -1; + + fnic_debug_dump_fip_frame(fnic, frame, frame_size, "Outgoing"); + return fnic_send_frame(fnic, frame, frame_size); } /** @@ -1193,64 +773,87 @@ int fnic_send(struct fc_lport *lp, struct fc_frame *fp) void fnic_flush_tx(struct work_struct *work) { struct fnic *fnic = container_of(work, struct fnic, flush_work); - struct sk_buff *skb; struct fc_frame *fp; + struct fnic_frame_list *cur_frame, *next; - while ((skb = skb_dequeue(&fnic->tx_queue))) { - fp = (struct fc_frame *)skb; - fnic_send_frame(fnic, fp); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Flush queued frames"); + + list_for_each_entry_safe(cur_frame, next, &fnic->tx_queue, links) { + fp = cur_frame->fp; + list_del(&cur_frame->links); + fnic_send_frame(fnic, fp, cur_frame->frame_len); + mempool_free(cur_frame, fnic->frame_elem_pool); } } -/** - * fnic_set_eth_mode() - put fnic into ethernet mode. - * @fnic: fnic device - * - * Called without fnic lock held. - */ -static void fnic_set_eth_mode(struct fnic *fnic) +int +fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + void *fp) { - unsigned long flags; - enum fnic_state old_state; + struct fnic *fnic = iport->fnic; + struct ethhdr *ethhdr; int ret; - spin_lock_irqsave(&fnic->fnic_lock, flags); -again: - old_state = fnic->state; - switch (old_state) { - case FNIC_IN_FC_MODE: - case FNIC_IN_ETH_TRANS_FC_MODE: - default: - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Setting port id: 0x%x fp: 0x%p fnic state: %d", port_id, + fp, fnic->state); - ret = fnic_fw_reset_handler(fnic); + if (fp) { + ethhdr = (struct ethhdr *) fp; + vnic_dev_add_addr(fnic->vdev, ethhdr->h_dest); + } - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state != FNIC_IN_FC_TRANS_ETH_MODE) - goto again; - if (ret) - fnic->state = old_state; - break; - - case FNIC_IN_FC_TRANS_ETH_MODE: - case FNIC_IN_ETH_MODE: - break; + /* Change state to reflect transition to FC mode */ + if (fnic->state == FNIC_IN_ETH_MODE || fnic->state == FNIC_IN_FC_MODE) + fnic->state = FNIC_IN_ETH_TRANS_FC_MODE; + else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unexpected fnic state while processing FLOGI response\n"); + return -1; + } + + /* + * Send FLOGI registration to firmware to set up FC mode. + * The new address will be set up when registration completes. + */ + ret = fnic_flogi_reg_handler(fnic, port_id); + if (ret < 0) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI registration error ret: %d fnic state: %d\n", + ret, fnic->state); + if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) + fnic->state = FNIC_IN_ETH_MODE; + + return -1; + } + iport->fabric.flags |= FNIC_FDLS_FPMA_LEARNT; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI registration success\n"); + return 0; +} + +void fnic_free_txq(struct list_head *head) +{ + struct fnic_frame_list *cur_frame, *next; + + list_for_each_entry_safe(cur_frame, next, head, links) { + list_del(&cur_frame->links); + kfree(cur_frame->fp); + kfree(cur_frame); } - spin_unlock_irqrestore(&fnic->fnic_lock, flags); } static void fnic_wq_complete_frame_send(struct vnic_wq *wq, struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque) { - struct sk_buff *skb = buf->os_buf; - struct fc_frame *fp = (struct fc_frame *)skb; struct fnic *fnic = vnic_dev_priv(wq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_TO_DEVICE); - dev_kfree_skb_irq(fp_skb(fp)); + mempool_free(buf->os_buf, fnic->frame_pool); buf->os_buf = NULL; } @@ -1288,119 +891,218 @@ int fnic_wq_cmpl_handler(struct fnic *fnic, int work_to_do) void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) { - struct fc_frame *fp = buf->os_buf; struct fnic *fnic = vnic_dev_priv(wq->vdev); dma_unmap_single(&fnic->pdev->dev, buf->dma_addr, buf->len, DMA_TO_DEVICE); - dev_kfree_skb(fp_skb(fp)); + kfree(buf->os_buf); buf->os_buf = NULL; } -void fnic_fcoe_reset_vlans(struct fnic *fnic) +void +fnic_fdls_add_tport(struct fnic_iport_s *iport, struct fnic_tport_s *tport, + unsigned long flags) +{ + struct fnic *fnic = iport->fnic; + struct fc_rport *rport; + struct fc_rport_identifiers ids; + struct rport_dd_data_s *rdd_data; + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Adding rport fcid: 0x%x", tport->fcid); + + ids.node_name = tport->wwnn; + ids.port_name = tport->wwpn; + ids.port_id = tport->fcid; + ids.roles = FC_RPORT_ROLE_FCP_TARGET; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + rport = fc_remote_port_add(fnic->host, 0, &ids); + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (!rport) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Failed to add rport for tport: 0x%x", tport->fcid); + return; + } + + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Added rport fcid: 0x%x", tport->fcid); + + /* Mimic these assignments in queuecommand to avoid timing issues */ + rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; + rport->supported_classes = FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; + rdd_data = rport->dd_data; + rdd_data->tport = tport; + rdd_data->iport = iport; + tport->rport = rport; + tport->flags |= FNIC_FDLS_SCSI_REGISTERED; +} + +void +fnic_fdls_remove_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags) +{ + struct fnic *fnic = iport->fnic; + struct rport_dd_data_s *rdd_data; + + struct fc_rport *rport; + + if (!tport) + return; + + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINE); + rport = tport->rport; + + if (rport) { + /* tport resource release will be done + * after fnic_terminate_rport_io() + */ + tport->flags |= FNIC_FDLS_TPORT_DELETED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + /* Interface to scsi_fc_transport */ + fc_remote_port_delete(rport); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Deregistered and freed tport fcid: 0x%x from scsi transport fc", + tport->fcid); + + /* + * the dd_data is allocated by fc transport + * of size dd_fcrport_size + */ + rdd_data = rport->dd_data; + rdd_data->tport = NULL; + rdd_data->iport = NULL; + list_del(&tport->links); + kfree(tport); + } else { + fnic_del_tport_timer_sync(fnic, tport); + list_del(&tport->links); + kfree(tport); + } +} + +void fnic_delete_fcp_tports(struct fnic *fnic) { + struct fnic_tport_s *tport, *next; unsigned long flags; - struct fcoe_vlan *vlan; - struct fcoe_vlan *next; - /* - * indicate a link down to fcoe so that all fcf's are free'd - * might not be required since we did this before sending vlan - * discovery request - */ - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (!list_empty(&fnic->vlans)) { - list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { - list_del(&vlan->list); - kfree(vlan); - } + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(tport, next, &fnic->iport.tport_list, links) { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "removing fcp rport fcid: 0x%x", tport->fcid); + fdls_set_tport_state(tport, FDLS_TGT_STATE_OFFLINING); + fnic_del_tport_timer_sync(fnic, tport); + fnic_fdls_remove_tport(&fnic->iport, tport, flags); } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); } -void fnic_handle_fip_timer(struct fnic *fnic) +/** + * fnic_tport_event_handler() - Handler for remote port events + * in the tport_event_queue. + * + * @work: Handle to the remote port being dequeued + */ +void fnic_tport_event_handler(struct work_struct *work) { + struct fnic *fnic = container_of(work, struct fnic, tport_work); + struct fnic_tport_event_s *cur_evt, *next; unsigned long flags; - struct fcoe_vlan *vlan; - struct fnic_stats *fnic_stats = &fnic->fnic_stats; - u64 sol_time; + struct fnic_tport_s *tport; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->stop_rx_link_events) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; + list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + tport = cur_evt->arg1; + switch (cur_evt->event) { + case TGT_EV_RPORT_ADD: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Add rport event"); + if (tport->state == FDLS_TGT_STATE_READY) { + fnic_fdls_add_tport(&fnic->iport, + (struct fnic_tport_s *) cur_evt->arg1, flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Target not ready. Add rport event dropped: 0x%x", + tport->fcid); + } + break; + case TGT_EV_RPORT_DEL: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Remove rport event"); + if (tport->state == FDLS_TGT_STATE_OFFLINING) { + fnic_fdls_remove_tport(&fnic->iport, + (struct fnic_tport_s *) cur_evt->arg1, flags); + } else { + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "remove rport event dropped tport fcid: 0x%x", + tport->fcid); + } + break; + case TGT_EV_TPORT_DELETE: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Delete tport event"); + fdls_delete_tport(tport->iport, tport); + break; + default: + FNIC_FCS_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unknown tport event"); + break; + } + list_del(&cur_evt->links); + kfree(cur_evt); } spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} - if (fnic->ctlr.mode == FIP_MODE_NON_FIP) - return; +void fnic_flush_tport_event_list(struct fnic *fnic) +{ + struct fnic_tport_event_s *cur_evt, *next; + unsigned long flags; - spin_lock_irqsave(&fnic->vlans_lock, flags); - if (list_empty(&fnic->vlans)) { - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - /* no vlans available, try again */ - if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) - if (printk_ratelimit()) - shost_printk(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - return; + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(cur_evt, next, &fnic->tport_event_list, links) { + list_del(&cur_evt->links); + kfree(cur_evt); } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fip_timer: vlan %d state %d sol_count %d\n", - vlan->vid, vlan->state, vlan->sol_count); - switch (vlan->state) { - case FIP_VLAN_USED: - FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "FIP VLAN is selected for FC transaction\n"); - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - break; - case FIP_VLAN_FAILED: - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - /* if all vlans are in failed state, restart vlan disc */ - if (unlikely(fnic_log_level & FNIC_FCS_LOGGING)) - if (printk_ratelimit()) - shost_printk(KERN_DEBUG, fnic->lport->host, - "Start VLAN Discovery\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - break; - case FIP_VLAN_SENT: - if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { - /* - * no response on this vlan, remove from the list. - * Try the next vlan - */ - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Dequeue this VLAN ID %d from list\n", - vlan->vid); - list_del(&vlan->list); - kfree(vlan); - vlan = NULL; - if (list_empty(&fnic->vlans)) { - /* we exhausted all vlans, restart vlan disc */ - spin_unlock_irqrestore(&fnic->vlans_lock, - flags); - FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "fip_timer: vlan list empty, " - "trigger vlan disc\n"); - fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); - return; - } - /* check the next vlan */ - vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, - list); - fnic->set_vlan(fnic, vlan->vid); - vlan->state = FIP_VLAN_SENT; /* sent now */ - } - spin_unlock_irqrestore(&fnic->vlans_lock, flags); - atomic64_inc(&fnic_stats->vlan_stats.sol_expiry_count); - vlan->sol_count++; - sol_time = jiffies + msecs_to_jiffies - (FCOE_CTLR_START_DELAY); - mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); - break; +void fnic_reset_work_handler(struct work_struct *work) +{ + struct fnic *cur_fnic, *next_fnic; + unsigned long reset_fnic_list_lock_flags; + int host_reset_ret_code; + + /* + * This is a single thread. It is per fnic module, not per fnic + * All the fnics that need to be reset + * have been serialized via the reset fnic list. + */ + spin_lock_irqsave(&reset_fnic_list_lock, reset_fnic_list_lock_flags); + list_for_each_entry_safe(cur_fnic, next_fnic, &reset_fnic_list, links) { + list_del(&cur_fnic->links); + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); + + dev_err(&cur_fnic->pdev->dev, "fnic: <%d>: issuing a host reset\n", + cur_fnic->fnic_num); + host_reset_ret_code = fnic_host_reset(cur_fnic->host); + dev_err(&cur_fnic->pdev->dev, + "fnic: <%d>: returned from host reset with status: %d\n", + cur_fnic->fnic_num, host_reset_ret_code); + + spin_lock_irqsave(&cur_fnic->fnic_lock, cur_fnic->lock_flags); + cur_fnic->pc_rscn_handling_status = + PC_RSCN_HANDLING_NOT_IN_PROGRESS; + spin_unlock_irqrestore(&cur_fnic->fnic_lock, cur_fnic->lock_flags); + + spin_lock_irqsave(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); } + spin_unlock_irqrestore(&reset_fnic_list_lock, + reset_fnic_list_lock_flags); } diff --git a/drivers/scsi/fnic/fnic_fdls.h b/drivers/scsi/fnic/fnic_fdls.h new file mode 100644 index 000000000000..8e610b65ad57 --- /dev/null +++ b/drivers/scsi/fnic/fnic_fdls.h @@ -0,0 +1,434 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#ifndef _FNIC_FDLS_H_ +#define _FNIC_FDLS_H_ + +#include "fnic_stats.h" +#include "fdls_fc.h" + +/* FDLS - Fabric discovery and login services + * -> VLAN discovery + * -> retry every retry delay seconds until it succeeds. + * <- List of VLANs + * + * -> Solicitation + * <- Solicitation response (Advertisement) + * + * -> FCF selection & FLOGI ( FLOGI timeout - 2 * E_D_TOV) + * <- FLOGI response + * + * -> FCF keep alive + * <- FCF keep alive + * + * -> PLOGI to FFFFFC (DNS) (PLOGI timeout - 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PLOGI response + * -> Retry PLOGI to FFFFFC (DNS) - Number of retries from vnic.cfg + * + * -> SCR to FFFFFC (DNS) (SCR timeout - 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- SCR response + * -> Retry SCR - Number of retries 2 + * + * -> GPN_FT to FFFFFC (GPN_FT timeout - 2 * R_A_TOV)a + * -> Retry on BUSY until it succeeds + * -> Retry on BUSY until it succeeds + * -> 2 retries on timeout + * + * -> RFT_ID to FFFFFC (DNS) (RFT_ID timeout - 3 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * -> Retry RFT_ID to FFFFFC (DNS) (Number of retries 2 ) + * -> Ignore if both retires fail. + * + * Session establishment with targets + * For each PWWN + * -> PLOGI to FCID of that PWWN (PLOGI timeout 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PLOGI response + * -> Retry PLOGI. Num retries using vnic.cfg + * + * -> PRLI to FCID of that PWWN (PRLI timeout 2 * R_A_TOV) + * -> ABTS if timeout (ABTS tomeout - 2 * R_A_TOV) + * <- PRLI response + * -> Retry PRLI. Num retries using vnic.cfg + * + */ + +#define FDLS_RETRY_COUNT 2 + +/* + * OXID encoding: + * bits 0-8: oxid idx - allocated from poool + * bits 9-13: oxid frame code from fnic_oxid_frame_type_e + * bits 14-15: all zeros + */ +#define FNIC_OXID_POOL_SZ (512) /* always power of 2 */ +#define FNIC_OXID_ENCODE(idx, frame_type) (frame_type | idx) +#define FNIC_FRAME_MASK 0xFE00 +#define FNIC_FRAME_TYPE(oxid) (oxid & FNIC_FRAME_MASK) +#define FNIC_OXID_IDX(oxid) ((oxid) & (FNIC_OXID_POOL_SZ - 1)) + +#define OXID_RECLAIM_TOV(iport) (2 * iport->r_a_tov) /* in milliseconds */ + +#define FNIC_FDLS_FABRIC_ABORT_ISSUED 0x1 +#define FNIC_FDLS_FPMA_LEARNT 0x2 + +/* tport flags */ +#define FNIC_FDLS_TPORT_IN_GPN_FT_LIST 0x1 +#define FNIC_FDLS_TGT_ABORT_ISSUED 0x2 +#define FNIC_FDLS_TPORT_SEND_ADISC 0x4 +#define FNIC_FDLS_RETRY_FRAME 0x8 +#define FNIC_FDLS_TPORT_BUSY 0x10 +#define FNIC_FDLS_TPORT_TERMINATING 0x20 +#define FNIC_FDLS_TPORT_DELETED 0x40 +#define FNIC_FDLS_SCSI_REGISTERED 0x200 + +/* Retry supported by rport(returned by prli service parameters) */ +#define FDLS_FC_RP_FLAGS_RETRY 0x1 + +#define fdls_set_state(_fdls_fabric, _state) ((_fdls_fabric)->state = _state) +#define fdls_get_state(_fdls_fabric) ((_fdls_fabric)->state) + +#define FNIC_FDMI_ACTIVE 0x8 +#define FNIC_FIRST_LINK_UP 0x2 + +#define fdls_set_tport_state(_tport, _state) (_tport->state = _state) +#define fdls_get_tport_state(_tport) (_tport->state) + +#define FNIC_PORTSPEED_10GBIT 1 +#define FNIC_FRAME_HT_ROOM (2148) +#define FNIC_FCOE_FRAME_MAXSZ (2112) + + +#define FNIC_FRAME_TYPE_FABRIC_FLOGI 0x1000 +#define FNIC_FRAME_TYPE_FABRIC_PLOGI 0x1200 +#define FNIC_FRAME_TYPE_FABRIC_RPN 0x1400 +#define FNIC_FRAME_TYPE_FABRIC_RFT 0x1600 +#define FNIC_FRAME_TYPE_FABRIC_RFF 0x1800 +#define FNIC_FRAME_TYPE_FABRIC_SCR 0x1A00 +#define FNIC_FRAME_TYPE_FABRIC_GPN_FT 0x1C00 +#define FNIC_FRAME_TYPE_FABRIC_LOGO 0x1E00 +#define FNIC_FRAME_TYPE_FDMI_PLOGI 0x2000 +#define FNIC_FRAME_TYPE_FDMI_RHBA 0x2200 +#define FNIC_FRAME_TYPE_FDMI_RPA 0x2400 +#define FNIC_FRAME_TYPE_TGT_PLOGI 0x2600 +#define FNIC_FRAME_TYPE_TGT_PRLI 0x2800 +#define FNIC_FRAME_TYPE_TGT_ADISC 0x2A00 +#define FNIC_FRAME_TYPE_TGT_LOGO 0x2C00 + +struct fnic_fip_fcf_s { + uint16_t vlan_id; + uint8_t fcf_mac[6]; + uint8_t fcf_priority; + uint32_t fka_adv_period; + uint8_t ka_disabled; +}; + +enum fnic_fdls_state_e { + FDLS_STATE_INIT = 0, + FDLS_STATE_LINKDOWN, + FDLS_STATE_FABRIC_LOGO, + FDLS_STATE_FLOGO_DONE, + FDLS_STATE_FABRIC_FLOGI, + FDLS_STATE_FABRIC_PLOGI, + FDLS_STATE_RPN_ID, + FDLS_STATE_REGISTER_FC4_TYPES, + FDLS_STATE_REGISTER_FC4_FEATURES, + FDLS_STATE_SCR, + FDLS_STATE_GPN_FT, + FDLS_STATE_TGT_DISCOVERY, + FDLS_STATE_RSCN_GPN_FT, + FDLS_STATE_SEND_GPNFT +}; + +struct fnic_fdls_fabric_s { + enum fnic_fdls_state_e state; + uint32_t flags; + struct list_head tport_list; /* List of discovered tports */ + struct timer_list retry_timer; + int del_timer_inprogress; + int del_fdmi_timer_inprogress; + int retry_counter; + int timer_pending; + int fdmi_retry; + struct timer_list fdmi_timer; + int fdmi_pending; +}; + +struct fnic_fdls_fip_s { + uint32_t state; + uint32_t flogi_retry; +}; + +/* Message to tport_event_handler */ +enum fnic_tgt_msg_id { + TGT_EV_NONE = 0, + TGT_EV_RPORT_ADD, + TGT_EV_RPORT_DEL, + TGT_EV_TPORT_DELETE, + TGT_EV_REMOVE +}; + +struct fnic_tport_event_s { + struct list_head links; + enum fnic_tgt_msg_id event; + void *arg1; +}; + +enum fdls_tgt_state_e { + FDLS_TGT_STATE_INIT = 0, + FDLS_TGT_STATE_PLOGI, + FDLS_TGT_STATE_PRLI, + FDLS_TGT_STATE_READY, + FDLS_TGT_STATE_LOGO_RECEIVED, + FDLS_TGT_STATE_ADISC, + FDL_TGT_STATE_PLOGO, + FDLS_TGT_STATE_OFFLINING, + FDLS_TGT_STATE_OFFLINE +}; + +struct fnic_tport_s { + struct list_head links; /* To link the tports */ + enum fdls_tgt_state_e state; + uint32_t flags; + uint32_t fcid; + uint64_t wwpn; + uint64_t wwnn; + uint16_t active_oxid; + uint16_t tgt_flags; + atomic_t in_flight; /* io counter */ + uint16_t max_payload_size; + uint16_t r_a_tov; + uint16_t e_d_tov; + uint16_t lun0_delay; + int max_concur_seqs; + uint32_t fcp_csp; + struct timer_list retry_timer; + int del_timer_inprogress; + int retry_counter; + int timer_pending; + unsigned int num_pending_cmds; + int nexus_restart_count; + int exch_reset_in_progress; + void *iport; + struct work_struct tport_del_work; + struct completion *tport_del_done; + struct fc_rport *rport; + char str_wwpn[20]; + char str_wwnn[20]; +}; + +/* OXID pool related structures */ +struct reclaim_entry_s { + struct list_head links; + /* oxid that needs to be freed after 2*r_a_tov */ + uint16_t oxid_idx; + /* in jiffies. Use this to waiting time */ + unsigned long expires; + unsigned long *bitmap; +}; + +/* used for allocating oxids for fabric and fdmi requests */ +struct fnic_oxid_pool_s { + DECLARE_BITMAP(bitmap, FNIC_OXID_POOL_SZ); + int sz; /* size of the pool or block */ + int next_idx; /* used for cycling through the oxid pool */ + + /* retry schedule free */ + DECLARE_BITMAP(pending_schedule_free, FNIC_OXID_POOL_SZ); + struct delayed_work schedule_oxid_free_retry; + + /* List of oxids that need to be freed and reclaimed. + * This list is shared by all the oxid pools + */ + struct list_head oxid_reclaim_list; + /* Work associated with reclaim list */ + struct delayed_work oxid_reclaim_work; +}; + +/* iport */ +enum fnic_iport_state_e { + FNIC_IPORT_STATE_INIT = 0, + FNIC_IPORT_STATE_LINK_WAIT, + FNIC_IPORT_STATE_FIP, + FNIC_IPORT_STATE_FABRIC_DISC, + FNIC_IPORT_STATE_READY +}; + +struct fnic_iport_s { + enum fnic_iport_state_e state; + struct fnic *fnic; + uint64_t boot_time; + uint32_t flags; + int usefip; + uint8_t hwmac[6]; /* HW MAC Addr */ + uint8_t fpma[6]; /* Fabric Provided MA */ + uint8_t fcfmac[6]; /* MAC addr of Fabric */ + uint16_t vlan_id; + uint32_t fcid; + + /* oxid pool */ + struct fnic_oxid_pool_s oxid_pool; + + /* + * fabric reqs are serialized and only one req at a time. + * Tracking the oxid for sending abort + */ + uint16_t active_oxid_fabric_req; + /* fdmi only */ + uint16_t active_oxid_fdmi_plogi; + uint16_t active_oxid_fdmi_rhba; + uint16_t active_oxid_fdmi_rpa; + + struct fnic_fip_fcf_s selected_fcf; + struct fnic_fdls_fip_s fip; + struct fnic_fdls_fabric_s fabric; + struct list_head tport_list; + struct list_head tport_list_pending_del; + /* list of tports for which we are yet to send PLOGO */ + struct list_head inprocess_tport_list; + struct list_head deleted_tport_list; + struct work_struct tport_event_work; + uint32_t e_d_tov; /* msec */ + uint32_t r_a_tov; /* msec */ + uint32_t link_supported_speeds; + uint32_t max_flogi_retries; + uint32_t max_plogi_retries; + uint32_t plogi_timeout; + uint32_t service_params; + uint64_t wwpn; + uint64_t wwnn; + uint16_t max_payload_size; + spinlock_t deleted_tport_lst_lock; + struct completion *flogi_reg_done; + struct fnic_iport_stats iport_stats; + char str_wwpn[20]; + char str_wwnn[20]; +}; + +struct rport_dd_data_s { + struct fnic_tport_s *tport; + struct fnic_iport_s *iport; +}; + +enum fnic_recv_frame_type_e { + FNIC_FABRIC_FLOGI_RSP = 1, + FNIC_FABRIC_PLOGI_RSP, + FNIC_FABRIC_RPN_RSP, + FNIC_FABRIC_RFT_RSP, + FNIC_FABRIC_RFF_RSP, + FNIC_FABRIC_SCR_RSP, + FNIC_FABRIC_GPN_FT_RSP, + FNIC_FABRIC_BLS_ABTS_RSP, + FNIC_FDMI_PLOGI_RSP, + FNIC_FDMI_REG_HBA_RSP, + FNIC_FDMI_RPA_RSP, + FNIC_FDMI_BLS_ABTS_RSP, + FNIC_FABRIC_LOGO_RSP, + + /* responses to target requests */ + FNIC_TPORT_PLOGI_RSP, + FNIC_TPORT_PRLI_RSP, + FNIC_TPORT_ADISC_RSP, + FNIC_TPORT_BLS_ABTS_RSP, + FNIC_TPORT_LOGO_RSP, + + /* unsolicited requests */ + FNIC_BLS_ABTS_REQ, + FNIC_ELS_PLOGI_REQ, + FNIC_ELS_RSCN_REQ, + FNIC_ELS_LOGO_REQ, + FNIC_ELS_ECHO_REQ, + FNIC_ELS_ADISC, + FNIC_ELS_RLS, + FNIC_ELS_RRQ, + FNIC_ELS_UNSUPPORTED_REQ, +}; + +enum fnic_port_speeds { + DCEM_PORTSPEED_NONE = 0, + DCEM_PORTSPEED_1G = 1000, + DCEM_PORTSPEED_2G = 2000, + DCEM_PORTSPEED_4G = 4000, + DCEM_PORTSPEED_8G = 8000, + DCEM_PORTSPEED_10G = 10000, + DCEM_PORTSPEED_16G = 16000, + DCEM_PORTSPEED_20G = 20000, + DCEM_PORTSPEED_25G = 25000, + DCEM_PORTSPEED_32G = 32000, + DCEM_PORTSPEED_40G = 40000, + DCEM_PORTSPEED_4x10G = 41000, + DCEM_PORTSPEED_50G = 50000, + DCEM_PORTSPEED_64G = 64000, + DCEM_PORTSPEED_100G = 100000, + DCEM_PORTSPEED_128G = 128000, +}; + +/* Function Declarations */ +/* fdls_disc.c */ +void fnic_fdls_disc_init(struct fnic_iport_s *iport); +void fnic_fdls_disc_start(struct fnic_iport_s *iport); +void fnic_fdls_recv_frame(struct fnic_iport_s *iport, void *rx_frame, + int len, int fchdr_offset); +void fnic_fdls_link_down(struct fnic_iport_s *iport); +int fdls_init_frame_pool(struct fnic_iport_s *iport); +uint8_t *fdls_alloc_frame(struct fnic_iport_s *iport); +uint16_t fdls_alloc_oxid(struct fnic_iport_s *iport, int oxid_frame_type, + uint16_t *active_oxid); +void fdls_free_oxid(struct fnic_iport_s *iport, + uint16_t oxid, uint16_t *active_oxid); +void fdls_tgt_logout(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +void fnic_del_fabric_timer_sync(struct fnic *fnic); +void fnic_del_tport_timer_sync(struct fnic *fnic, + struct fnic_tport_s *tport); +void fdls_send_fabric_logo(struct fnic_iport_s *iport); +int fnic_fdls_validate_and_get_frame_type(struct fnic_iport_s *iport, + struct fc_frame_header *fchdr); +void fdls_send_tport_abts(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +bool fdls_delete_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport); +void fdls_fdmi_timer_callback(struct timer_list *t); + +/* fnic_fcs.c */ +void fnic_fdls_init(struct fnic *fnic, int usefip); +void fnic_send_fcoe_frame(struct fnic_iport_s *iport, void *frame, + int frame_size); +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +int fnic_send_fip_frame(struct fnic_iport_s *iport, + void *frame, int frame_size); +void fnic_fdls_learn_fcoe_macs(struct fnic_iport_s *iport, void *rx_frame, + uint8_t *fcid); +void fnic_fdls_add_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, unsigned long flags); +void fnic_fdls_remove_tport(struct fnic_iport_s *iport, + struct fnic_tport_s *tport, + unsigned long flags); + +/* fip.c */ +void fnic_fcoe_send_vlan_req(struct fnic *fnic); +void fnic_common_fip_cleanup(struct fnic *fnic); +int fdls_fip_recv_frame(struct fnic *fnic, void *frame); +void fnic_handle_fcs_ka_timer(struct timer_list *t); +void fnic_handle_enode_ka_timer(struct timer_list *t); +void fnic_handle_vn_ka_timer(struct timer_list *t); +void fnic_handle_fip_timer(struct timer_list *t); +extern void fdls_fabric_timer_callback(struct timer_list *t); + +/* fnic_scsi.c */ +void fnic_scsi_fcpio_reset(struct fnic *fnic); +extern void fdls_fabric_timer_callback(struct timer_list *t); +void fnic_rport_exch_reset(struct fnic *fnic, u32 fcid); +int fnic_fdls_register_portid(struct fnic_iport_s *iport, u32 port_id, + void *fp); +struct fnic_tport_s *fnic_find_tport_by_fcid(struct fnic_iport_s *iport, + uint32_t fcid); +struct fnic_tport_s *fnic_find_tport_by_wwpn(struct fnic_iport_s *iport, + uint64_t wwpn); + +#endif /* _FNIC_FDLS_H_ */ diff --git a/drivers/scsi/fnic/fnic_fip.h b/drivers/scsi/fnic/fnic_fip.h deleted file mode 100644 index 79f53029737b..000000000000 --- a/drivers/scsi/fnic/fnic_fip.h +++ /dev/null @@ -1,48 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2008 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - */ - -#ifndef _FNIC_FIP_H_ -#define _FNIC_FIP_H_ - - -#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ -#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ -#define FCOE_CTLR_MAX_SOL 8 - -#define FINC_MAX_FLOGI_REJECTS 8 - -struct vlan { - __be16 vid; - __be16 type; -}; - -/* - * VLAN entry. - */ -struct fcoe_vlan { - struct list_head list; - u16 vid; /* vlan ID */ - u16 sol_count; /* no. of sols sent */ - u16 state; /* state */ -}; - -enum fip_vlan_state { - FIP_VLAN_AVAIL = 0, /* don't do anything */ - FIP_VLAN_SENT = 1, /* sent */ - FIP_VLAN_USED = 2, /* succeed */ - FIP_VLAN_FAILED = 3, /* failed to response */ -}; - -struct fip_vlan { - struct ethhdr eth; - struct fip_header fip; - struct { - struct fip_mac_desc mac; - struct fip_wwn_desc wwnn; - } desc; -}; - -#endif /* __FINC_FIP_H_ */ diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h index 5895ead20e14..0d974e040ab7 100644 --- a/drivers/scsi/fnic/fnic_io.h +++ b/drivers/scsi/fnic/fnic_io.h @@ -7,6 +7,7 @@ #define _FNIC_IO_H_ #include <scsi/fc/fc_fcp.h> +#include "fnic_fdls.h" #define FNIC_DFLT_SG_DESC_CNT 32 #define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */ @@ -41,6 +42,8 @@ enum fnic_ioreq_state { }; struct fnic_io_req { + struct fnic_iport_s *iport; + struct fnic_tport_s *tport; struct host_sg_desc *sgl_list; /* sgl list */ void *sgl_list_alloc; /* sgl list address used for free */ dma_addr_t sense_buf_pa; /* dma address for sense buffer*/ @@ -55,15 +58,4 @@ struct fnic_io_req { unsigned int tag; struct scsi_cmnd *sc; /* midlayer's cmd pointer */ }; - -enum fnic_port_speeds { - DCEM_PORTSPEED_NONE = 0, - DCEM_PORTSPEED_1G = 1000, - DCEM_PORTSPEED_10G = 10000, - DCEM_PORTSPEED_20G = 20000, - DCEM_PORTSPEED_25G = 25000, - DCEM_PORTSPEED_40G = 40000, - DCEM_PORTSPEED_4x10G = 41000, - DCEM_PORTSPEED_100G = 100000, -}; #endif /* _FNIC_IO_H_ */ diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index ff85441c6cea..7ed50b11afa6 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -7,7 +7,7 @@ #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> -#include <scsi/libfc.h> +#include <scsi/scsi_transport_fc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" @@ -222,7 +222,7 @@ int fnic_request_intr(struct fnic *fnic) fnic->msix[i].devname, fnic->msix[i].devid); if (err) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "request_irq failed with error: %d\n", err); fnic_free_intr(fnic); @@ -250,10 +250,10 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "rq-array size: %d wq-array size: %d copy-wq array size: %d\n", n, m, o); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "rq_count: %d raw_wq_count: %d wq_copy_count: %d cq_count: %d\n", fnic->rq_count, fnic->raw_wq_count, fnic->wq_copy_count, fnic->cq_count); @@ -265,17 +265,17 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) vec_count = pci_alloc_irq_vectors(fnic->pdev, min_irqs, vecs, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "allocated %d MSI-X vectors\n", vec_count); if (vec_count > 0) { if (vec_count < vecs) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "interrupts number mismatch: vec_count: %d vecs: %d\n", vec_count, vecs); if (vec_count < min_irqs) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "no interrupts for copy wq\n"); return 1; } @@ -287,7 +287,7 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) fnic->wq_copy_count = vec_count - n - m - 1; fnic->wq_count = fnic->raw_wq_count + fnic->wq_copy_count; if (fnic->cq_count != vec_count - 1) { - FNIC_ISR_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "CQ count: %d does not match MSI-X vector count: %d\n", fnic->cq_count, vec_count); fnic->cq_count = vec_count - 1; @@ -295,23 +295,23 @@ int fnic_set_intr_mode_msix(struct fnic *fnic) fnic->intr_count = vec_count; fnic->err_intr_offset = fnic->rq_count + fnic->wq_count; - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "rq_count: %d raw_wq_count: %d copy_wq_base: %d\n", fnic->rq_count, fnic->raw_wq_count, fnic->copy_wq_base); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "wq_copy_count: %d wq_count: %d cq_count: %d\n", fnic->wq_copy_count, fnic->wq_count, fnic->cq_count); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "intr_count: %d err_intr_offset: %u", fnic->intr_count, fnic->err_intr_offset); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); - FNIC_ISR_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "fnic using MSI-X\n"); return 0; } @@ -351,7 +351,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->intr_count = 1; fnic->err_intr_offset = 0; - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); @@ -377,7 +377,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->cq_count = 3; fnic->intr_count = 3; - FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_ISR_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 6772f3683a8c..2fc5e9688147 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c @@ -22,15 +22,15 @@ #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_tcq.h> -#include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" -#include "fnic_fip.h" #include "fnic.h" +#include "fnic_fdls.h" +#include "fdls_fc.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -39,10 +39,16 @@ static struct kmem_cache *fnic_sgl_cache[FNIC_SGL_NUM_CACHES]; static struct kmem_cache *fnic_io_req_cache; +static struct kmem_cache *fdls_frame_cache; +static struct kmem_cache *fdls_frame_elem_cache; static LIST_HEAD(fnic_list); static DEFINE_SPINLOCK(fnic_list_lock); static DEFINE_IDA(fnic_ida); +struct work_struct reset_fnic_work; +LIST_HEAD(reset_fnic_list); +DEFINE_SPINLOCK(reset_fnic_list_lock); + /* Supported devices by fnic module */ static const struct pci_device_id fnic_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CISCO, PCI_DEVICE_ID_CISCO_FNIC) }, @@ -60,6 +66,14 @@ unsigned int fnic_log_level; module_param(fnic_log_level, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels"); +unsigned int fnic_fdmi_support = 1; +module_param(fnic_fdmi_support, int, 0644); +MODULE_PARM_DESC(fnic_fdmi_support, "FDMI support"); + +static unsigned int fnic_tgt_id_binding = 1; +module_param(fnic_tgt_id_binding, uint, 0644); +MODULE_PARM_DESC(fnic_tgt_id_binding, + "Target ID binding (0 for none. 1 for binding by WWPN (default))"); unsigned int io_completions = FNIC_DFLT_IO_COMPLETIONS; module_param(io_completions, int, S_IRUGO|S_IWUSR); @@ -79,13 +93,13 @@ static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); -static struct libfc_function_template fnic_transport_template = { - .frame_send = fnic_send, - .lport_set_port_id = fnic_set_port_id, - .fcp_abort_io = fnic_empty_scsi_cleanup, - .fcp_cleanup = fnic_empty_scsi_cleanup, - .exch_mgr_reset = fnic_exch_mgr_reset -}; +unsigned int pc_rscn_handling_feature_flag = PC_RSCN_HANDLING_FEATURE_ON; +module_param(pc_rscn_handling_feature_flag, uint, 0644); +MODULE_PARM_DESC(pc_rscn_handling_feature_flag, + "PCRSCN handling (0 for none. 1 to handle PCRSCN (default))"); + +struct workqueue_struct *reset_fnic_work_queue; +struct workqueue_struct *fnic_fip_queue; static int fnic_sdev_init(struct scsi_device *sdev) { @@ -105,7 +119,7 @@ static const struct scsi_host_template fnic_host_template = { .eh_timed_out = fc_eh_timed_out, .eh_abort_handler = fnic_abort_cmd, .eh_device_reset_handler = fnic_device_reset, - .eh_host_reset_handler = fnic_host_reset, + .eh_host_reset_handler = fnic_eh_host_reset_handler, .sdev_init = fnic_sdev_init, .change_queue_depth = scsi_change_queue_depth, .this_id = -1, @@ -146,7 +160,7 @@ static struct fc_function_template fnic_fc_functions = { .get_host_speed = fnic_get_host_speed, .show_host_speed = 1, .show_host_port_type = 1, - .get_host_port_state = fc_get_host_port_state, + .get_host_port_state = fnic_get_host_port_state, .show_host_port_state = 1, .show_host_symbolic_name = 1, .show_rport_maxframe_size = 1, @@ -157,54 +171,88 @@ static struct fc_function_template fnic_fc_functions = { .show_starget_port_id = 1, .show_rport_dev_loss_tmo = 1, .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, - .issue_fc_host_lip = fnic_reset, + .issue_fc_host_lip = fnic_issue_fc_host_lip, .get_fc_host_stats = fnic_get_stats, .reset_fc_host_stats = fnic_reset_host_stats, - .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), + .dd_fcrport_size = sizeof(struct rport_dd_data_s), .terminate_rport_io = fnic_terminate_rport_io, - .bsg_request = fc_lport_bsg_request, + .bsg_request = NULL, }; static void fnic_get_host_speed(struct Scsi_Host *shost) { - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); u32 port_speed = vnic_dev_port_speed(fnic->vdev); + struct fnic_stats *fnic_stats = &fnic->fnic_stats; + + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "port_speed: %d Mbps", port_speed); + atomic64_set(&fnic_stats->misc_stats.port_speed_in_mbps, port_speed); /* Add in other values as they get defined in fw */ switch (port_speed) { + case DCEM_PORTSPEED_1G: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; + case DCEM_PORTSPEED_2G: + fc_host_speed(shost) = FC_PORTSPEED_2GBIT; + break; + case DCEM_PORTSPEED_4G: + fc_host_speed(shost) = FC_PORTSPEED_4GBIT; + break; + case DCEM_PORTSPEED_8G: + fc_host_speed(shost) = FC_PORTSPEED_8GBIT; + break; case DCEM_PORTSPEED_10G: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case DCEM_PORTSPEED_16G: + fc_host_speed(shost) = FC_PORTSPEED_16GBIT; + break; case DCEM_PORTSPEED_20G: fc_host_speed(shost) = FC_PORTSPEED_20GBIT; break; case DCEM_PORTSPEED_25G: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; + case DCEM_PORTSPEED_32G: + fc_host_speed(shost) = FC_PORTSPEED_32GBIT; + break; case DCEM_PORTSPEED_40G: case DCEM_PORTSPEED_4x10G: fc_host_speed(shost) = FC_PORTSPEED_40GBIT; break; + case DCEM_PORTSPEED_50G: + fc_host_speed(shost) = FC_PORTSPEED_50GBIT; + break; + case DCEM_PORTSPEED_64G: + fc_host_speed(shost) = FC_PORTSPEED_64GBIT; + break; case DCEM_PORTSPEED_100G: fc_host_speed(shost) = FC_PORTSPEED_100GBIT; break; + case DCEM_PORTSPEED_128G: + fc_host_speed(shost) = FC_PORTSPEED_128GBIT; + break; default: + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Unknown FC speed: %d Mbps", port_speed); fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN; break; } } +/* Placeholder function */ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) { int ret; - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); - struct fc_host_statistics *stats = &lp->host_stats; + struct fnic *fnic = *((struct fnic **) shost_priv(host)); + struct fc_host_statistics *stats = &fnic->fnic_stats.host_stats; struct vnic_stats *vs; unsigned long flags; - if (time_before(jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) + if (time_before + (jiffies, fnic->stats_time + HZ / FNIC_STATS_RATE_LIMIT)) return stats; fnic->stats_time = jiffies; @@ -213,24 +261,22 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { - FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic: Get vnic stats failed" - " 0x%x", ret); + FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic: Get vnic stats failed: 0x%x", ret); return stats; } vs = fnic->stats; stats->tx_frames = vs->tx.tx_unicast_frames_ok; - stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; + stats->tx_words = vs->tx.tx_unicast_bytes_ok / 4; stats->rx_frames = vs->rx.rx_unicast_frames_ok; - stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; + stats->rx_words = vs->rx.rx_unicast_bytes_ok / 4; stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; stats->invalid_crc_count = vs->rx.rx_crc_errors; stats->seconds_since_last_reset = - (jiffies - fnic->stats_reset_time) / HZ; + (jiffies - fnic->stats_reset_time) / HZ; stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); - return stats; } @@ -311,8 +357,7 @@ void fnic_dump_fchost_stats(struct Scsi_Host *host, static void fnic_reset_host_stats(struct Scsi_Host *host) { int ret; - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(host)); struct fc_host_statistics *stats; unsigned long flags; @@ -325,7 +370,7 @@ static void fnic_reset_host_stats(struct Scsi_Host *host) spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (ret) { - FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic: Reset vnic stats failed" " 0x%x", ret); return; @@ -344,25 +389,19 @@ void fnic_log_q_error(struct fnic *fnic) for (i = 0; i < fnic->raw_wq_count; i++) { error_status = ioread32(&fnic->wq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "WQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "WQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < fnic->rq_count; i++) { error_status = ioread32(&fnic->rq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "RQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "RQ[%d] error_status %d\n", i, error_status); } for (i = 0; i < fnic->wq_copy_count; i++) { error_status = ioread32(&fnic->hw_copy_wq[i].ctrl->error_status); if (error_status) - shost_printk(KERN_ERR, fnic->lport->host, - "CWQ[%d] error_status" - " %d\n", i, error_status); + dev_err(&fnic->pdev->dev, "CWQ[%d] error_status %d\n", i, error_status); } } @@ -396,8 +435,7 @@ static int fnic_notify_set(struct fnic *fnic) err = vnic_dev_notify_set(fnic->vdev, fnic->wq_copy_count + fnic->copy_wq_base); break; default: - shost_printk(KERN_ERR, fnic->lport->host, - "Interrupt mode should be set up" + dev_err(&fnic->pdev->dev, "Interrupt mode should be set up" " before devcmd notify set %d\n", vnic_dev_get_intr_mode(fnic->vdev)); err = -1; @@ -416,13 +454,6 @@ static void fnic_notify_timer(struct timer_list *t) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } -static void fnic_fip_notify_timer(struct timer_list *t) -{ - struct fnic *fnic = from_timer(fnic, t, fip_timer); - - fnic_handle_fip_timer(fnic); -} - static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -522,6 +553,8 @@ static int fnic_cleanup(struct fnic *fnic) vnic_intr_clean(&fnic->intr[i]); mempool_destroy(fnic->io_req_pool); + mempool_destroy(fnic->frame_pool); + mempool_destroy(fnic->frame_elem_pool); for (i = 0; i < FNIC_SGL_NUM_CACHES; i++) mempool_destroy(fnic->io_sgl_pool[i]); @@ -534,25 +567,36 @@ static void fnic_iounmap(struct fnic *fnic) iounmap(fnic->bar0.vaddr); } -/** - * fnic_get_mac() - get assigned data MAC address for FIP code. - * @lport: local port. - */ -static u8 *fnic_get_mac(struct fc_lport *lport) +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) { - struct fnic *fnic = lport_priv(lport); + vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} - return fnic->data_src_addr; +static void fnic_scsi_init(struct fnic *fnic) +{ + struct Scsi_Host *host = fnic->host; + + snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, + host->host_no); + + host->transportt = fnic_fc_transport; } -static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +static void fnic_free_ioreq_tables_mq(struct fnic *fnic) { - vnic_dev_set_default_vlan(fnic->vdev, vlan_id); + int hwq; + + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) + kfree(fnic->sw_copy_wq[hwq].io_req_table); } static int fnic_scsi_drv_init(struct fnic *fnic) { - struct Scsi_Host *host = fnic->lport->host; + struct Scsi_Host *host = fnic->host; + int err; + struct pci_dev *pdev = fnic->pdev; + struct fnic_iport_s *iport = &fnic->iport; + int hwq; /* Configure maximum outstanding IO reqs*/ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) @@ -563,40 +607,92 @@ static int fnic_scsi_drv_init(struct fnic *fnic) fnic->fnic_max_tag_id = host->can_queue; host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; - host->max_cmd_len = FCOE_MAX_CMD_LEN; + host->max_cmd_len = FNIC_FCOE_MAX_CMD_LEN; host->nr_hw_queues = fnic->wq_copy_count; - shost_printk(KERN_INFO, host, - "fnic: can_queue: %d max_lun: %llu", + dev_info(&fnic->pdev->dev, "fnic: can_queue: %d max_lun: %llu", host->can_queue, host->max_lun); - shost_printk(KERN_INFO, host, - "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", + dev_info(&fnic->pdev->dev, "fnic: max_id: %d max_cmd_len: %d nr_hw_queues: %d", host->max_id, host->max_cmd_len, host->nr_hw_queues); + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { + fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; + fnic->sw_copy_wq[hwq].io_req_table = + kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * + sizeof(struct fnic_io_req *), GFP_KERNEL); + + if (!fnic->sw_copy_wq[hwq].io_req_table) { + fnic_free_ioreq_tables_mq(fnic); + return -ENOMEM; + } + } + + dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", + fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); + + fnic_scsi_init(fnic); + + err = scsi_add_host(fnic->host, &pdev->dev); + if (err) { + dev_err(&fnic->pdev->dev, "fnic: scsi add host failed: aborting\n"); + return err; + } + fc_host_maxframe_size(fnic->host) = iport->max_payload_size; + fc_host_dev_loss_tmo(fnic->host) = + fnic->config.port_down_timeout / 1000; + sprintf(fc_host_symbolic_name(fnic->host), + DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + fc_host_port_type(fnic->host) = FC_PORTTYPE_NPORT; + fc_host_node_name(fnic->host) = iport->wwnn; + fc_host_port_name(fnic->host) = iport->wwpn; + fc_host_supported_classes(fnic->host) = FC_COS_CLASS3; + memset(fc_host_supported_fc4s(fnic->host), 0, + sizeof(fc_host_supported_fc4s(fnic->host))); + fc_host_supported_fc4s(fnic->host)[2] = 1; + fc_host_supported_fc4s(fnic->host)[7] = 1; + fc_host_supported_speeds(fnic->host) = 0; + fc_host_supported_speeds(fnic->host) |= FC_PORTSPEED_8GBIT; + + dev_info(&fnic->pdev->dev, "shost_data: 0x%p\n", fnic->host->shost_data); + if (fnic->host->shost_data != NULL) { + if (fnic_tgt_id_binding == 0) { + dev_info(&fnic->pdev->dev, "Setting target binding to NONE\n"); + fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_NONE; + } else { + dev_info(&fnic->pdev->dev, "Setting target binding to WWPN\n"); + fc_host_tgtid_bind_type(fnic->host) = FC_TGTID_BIND_BY_WWPN; + } + } + + fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); + if (!fnic->io_req_pool) { + scsi_remove_host(fnic->host); + return -ENOMEM; + } + return 0; } void fnic_mq_map_queues_cpus(struct Scsi_Host *host) { - struct fc_lport *lp = shost_priv(host); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(host)); struct pci_dev *l_pdev = fnic->pdev; int intr_mode = fnic->config.intr_mode; struct blk_mq_queue_map *qmap = &host->tag_set.map[HCTX_TYPE_DEFAULT]; if (intr_mode == VNIC_DEV_INTR_MODE_MSI || intr_mode == VNIC_DEV_INTR_MODE_INTX) { - FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "intr_mode is not msix\n"); return; } - FNIC_MAIN_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "qmap->nr_queues: %d\n", qmap->nr_queues); if (l_pdev == NULL) { - FNIC_MAIN_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_MAIN_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "l_pdev is null\n"); return; } @@ -606,60 +702,65 @@ void fnic_mq_map_queues_cpus(struct Scsi_Host *host) static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct Scsi_Host *host; - struct fc_lport *lp; + struct Scsi_Host *host = NULL; struct fnic *fnic; mempool_t *pool; + struct fnic_iport_s *iport; int err = 0; int fnic_id = 0; int i; unsigned long flags; - int hwq; + char *desc, *subsys_desc; + int len; /* - * Allocate SCSI Host and set up association between host, - * local port, and fnic + * Allocate fnic */ - lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); - if (!lp) { - printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); + fnic = kzalloc(sizeof(struct fnic), GFP_KERNEL); + if (!fnic) { err = -ENOMEM; - goto err_out; + goto err_out_fnic_alloc; } - host = lp->host; - fnic = lport_priv(lp); + iport = &fnic->iport; fnic_id = ida_alloc(&fnic_ida, GFP_KERNEL); if (fnic_id < 0) { - pr_err("Unable to alloc fnic ID\n"); + dev_err(&pdev->dev, "Unable to alloc fnic ID\n"); err = fnic_id; goto err_out_ida_alloc; } - fnic->lport = lp; - fnic->ctlr.lp = lp; - fnic->link_events = 0; - fnic->pdev = pdev; - - snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, - host->host_no); - host->transportt = fnic_fc_transport; + fnic->pdev = pdev; fnic->fnic_num = fnic_id; - fnic_stats_debugfs_init(fnic); + + /* Find model name from PCIe subsys ID */ + if (fnic_get_desc_by_devid(pdev, &desc, &subsys_desc) == 0) { + dev_info(&fnic->pdev->dev, "Model: %s\n", subsys_desc); + + /* Update FDMI model */ + fnic->subsys_desc_len = strlen(subsys_desc); + len = ARRAY_SIZE(fnic->subsys_desc); + if (fnic->subsys_desc_len > len) + fnic->subsys_desc_len = len; + memcpy(fnic->subsys_desc, subsys_desc, fnic->subsys_desc_len); + dev_info(&fnic->pdev->dev, "FDMI Model: %s\n", fnic->subsys_desc); + } else { + fnic->subsys_desc_len = 0; + dev_info(&fnic->pdev->dev, "Model: %s subsys_id: 0x%04x\n", "Unknown", + pdev->subsystem_device); + } err = pci_enable_device(pdev); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot enable PCI device, aborting.\n"); - goto err_out_free_hba; + dev_err(&fnic->pdev->dev, "Cannot enable PCI device, aborting.\n"); + goto err_out_pci_enable_device; } err = pci_request_regions(pdev, DRV_NAME); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot enable PCI resources, aborting\n"); - goto err_out_disable_device; + dev_err(&fnic->pdev->dev, "Cannot enable PCI resources, aborting\n"); + goto err_out_pci_request_regions; } pci_set_master(pdev); @@ -672,19 +773,17 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "No usable DMA configuration " + dev_err(&fnic->pdev->dev, "No usable DMA configuration " "aborting\n"); - goto err_out_release_regions; + goto err_out_set_dma_mask; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { - shost_printk(KERN_ERR, fnic->lport->host, - "BAR0 not memory-map'able, aborting.\n"); + dev_err(&fnic->pdev->dev, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; - goto err_out_release_regions; + goto err_out_map_bar; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); @@ -692,61 +791,79 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { - shost_printk(KERN_ERR, fnic->lport->host, - "Cannot memory-map BAR0 res hdr, " + dev_err(&fnic->pdev->dev, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; - goto err_out_release_regions; + goto err_out_fnic_map_bar; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC registration failed, " + dev_err(&fnic->pdev->dev, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; - goto err_out_iounmap; + goto err_out_dev_register; } err = vnic_dev_cmd_init(fnic->vdev); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vnic_dev_cmd_init() returns %d, aborting\n", + dev_err(&fnic->pdev->dev, "vnic_dev_cmd_init() returns %d, aborting\n", err); - goto err_out_vnic_unregister; + goto err_out_dev_cmd_init; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, CMD_OPENF_RQ_ENABLE_THEN_POST); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC dev open failed, aborting.\n"); - goto err_out_dev_cmd_deinit; + dev_err(&fnic->pdev->dev, "vNIC dev open failed, aborting.\n"); + goto err_out_dev_open; } err = vnic_dev_init(fnic->vdev, 0); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC dev init failed, aborting.\n"); - goto err_out_dev_close; + dev_err(&fnic->pdev->dev, "vNIC dev init failed, aborting.\n"); + goto err_out_dev_init; } - err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + err = vnic_dev_mac_addr(fnic->vdev, iport->hwmac); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vNIC get MAC addr failed \n"); - goto err_out_dev_close; + dev_err(&fnic->pdev->dev, "vNIC get MAC addr failed\n"); + goto err_out_dev_mac_addr; } /* set data_src for point-to-point mode and to keep it non-zero */ - memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); + memcpy(fnic->data_src_addr, iport->hwmac, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Get vNIC configuration failed, " + dev_err(&fnic->pdev->dev, "Get vNIC configuration failed, " "aborting.\n"); - goto err_out_dev_close; + goto err_out_fnic_get_config; + } + + switch (fnic->config.flags & 0xff0) { + case VFCF_FC_INITIATOR: + { + host = + scsi_host_alloc(&fnic_host_template, + sizeof(struct fnic *)); + if (!host) { + dev_err(&fnic->pdev->dev, "Unable to allocate scsi host\n"); + err = -ENOMEM; + goto err_out_scsi_host_alloc; + } + *((struct fnic **) shost_priv(host)) = fnic; + + fnic->host = host; + fnic->role = FNIC_ROLE_FCP_INITIATOR; + dev_info(&fnic->pdev->dev, "fnic: %d is scsi initiator\n", + fnic->fnic_num); + } + break; + default: + dev_info(&fnic->pdev->dev, "fnic: %d has no role defined\n", fnic->fnic_num); + err = -EINVAL; + goto err_out_fnic_role; } /* Setup PCI resources */ @@ -756,29 +873,18 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = fnic_set_intr_mode(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to set intr mode, " + dev_err(&fnic->pdev->dev, "Failed to set intr mode, " "aborting.\n"); - goto err_out_dev_close; + goto err_out_fnic_set_intr_mode; } err = fnic_alloc_vnic_resources(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to alloc vNIC resources, " + dev_err(&fnic->pdev->dev, "Failed to alloc vNIC resources, " "aborting.\n"); - goto err_out_clear_intr; + goto err_out_fnic_alloc_vnic_res; } - - fnic_scsi_drv_init(fnic); - - for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) { - fnic->sw_copy_wq[hwq].ioreq_table_size = fnic->fnic_max_tag_id; - fnic->sw_copy_wq[hwq].io_req_table = - kzalloc((fnic->sw_copy_wq[hwq].ioreq_table_size + 1) * - sizeof(struct fnic_io_req *), GFP_KERNEL); - } - shost_printk(KERN_INFO, fnic->lport->host, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", + dev_info(&fnic->pdev->dev, "fnic copy wqs: %d, Q0 ioreq table size: %d\n", fnic->wq_copy_count, fnic->sw_copy_wq[0].ioreq_table_size); /* initialize all fnic locks */ @@ -794,50 +900,56 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) fnic->fw_ack_index[i] = -1; } - err = -ENOMEM; - fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); - if (!fnic->io_req_pool) - goto err_out_free_resources; - pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); - if (!pool) - goto err_out_free_ioreq_pool; + if (!pool) { + err = -ENOMEM; + goto err_out_free_resources; + } fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); - if (!pool) + if (!pool) { + err = -ENOMEM; goto err_out_free_dflt_pool; + } fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; + pool = mempool_create_slab_pool(FDLS_MIN_FRAMES, fdls_frame_cache); + if (!pool) { + err = -ENOMEM; + goto err_out_fdls_frame_pool; + } + fnic->frame_pool = pool; + + pool = mempool_create_slab_pool(FDLS_MIN_FRAME_ELEM, + fdls_frame_elem_cache); + if (!pool) { + err = -ENOMEM; + goto err_out_fdls_frame_elem_pool; + } + fnic->frame_elem_pool = pool; + /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; - /* Initialize the FIP fcoe_ctrl struct */ - fnic->ctlr.send = fnic_eth_send; - fnic->ctlr.update_mac = fnic_update_mac; - fnic->ctlr.get_src_addr = fnic_get_mac; if (fnic->config.flags & VFCF_FIP_CAPABLE) { - shost_printk(KERN_INFO, fnic->lport->host, - "firmware supports FIP\n"); + dev_info(&fnic->pdev->dev, "firmware supports FIP\n"); /* enable directed and multicast */ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); - vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); - fnic->set_vlan = fnic_set_vlan; - fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); - timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0); + vnic_dev_add_addr(fnic->vdev, iport->hwmac); spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); - INIT_WORK(&fnic->event_work, fnic_handle_event); - skb_queue_head_init(&fnic->fip_frame_queue); - INIT_LIST_HEAD(&fnic->evlist); - INIT_LIST_HEAD(&fnic->vlans); + INIT_LIST_HEAD(&fnic->fip_frame_queue); + INIT_LIST_HEAD(&fnic->vlan_list); + timer_setup(&fnic->retry_fip_timer, fnic_handle_fip_timer, 0); + timer_setup(&fnic->fcs_ka_timer, fnic_handle_fcs_ka_timer, 0); + timer_setup(&fnic->enode_ka_timer, fnic_handle_enode_ka_timer, 0); + timer_setup(&fnic->vn_ka_timer, fnic_handle_vn_ka_timer, 0); + fnic->set_vlan = fnic_set_vlan; } else { - shost_printk(KERN_INFO, fnic->lport->host, - "firmware uses non-FIP mode\n"); - fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); - fnic->ctlr.state = FIP_ST_NON_FIP; + dev_info(&fnic->pdev->dev, "firmware uses non-FIP mode\n"); } fnic->state = FNIC_IN_FC_MODE; @@ -850,9 +962,8 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to alloc notify buffer, aborting.\n"); - goto err_out_free_max_pool; + dev_err(&fnic->pdev->dev, "Failed to alloc notify buffer, aborting.\n"); + goto err_out_fnic_notify_set; } /* Setup notify timer when using MSI interrupts */ @@ -863,13 +974,62 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic_alloc_rq_frame can't alloc " + dev_err(&fnic->pdev->dev, "fnic_alloc_rq_frame can't alloc " "frame\n"); - goto err_out_rq_buf; + goto err_out_alloc_rq_buf; } } + init_completion(&fnic->reset_completion_wait); + + /* Start local port initialization */ + iport->max_flogi_retries = fnic->config.flogi_retries; + iport->max_plogi_retries = fnic->config.plogi_retries; + iport->plogi_timeout = fnic->config.plogi_timeout; + iport->service_params = + (FNIC_FCP_SP_INITIATOR | FNIC_FCP_SP_RD_XRDY_DIS | + FNIC_FCP_SP_CONF_CMPL); + if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) + iport->service_params |= FNIC_FCP_SP_RETRY; + + iport->boot_time = jiffies; + iport->e_d_tov = fnic->config.ed_tov; + iport->r_a_tov = fnic->config.ra_tov; + iport->link_supported_speeds = FNIC_PORTSPEED_10GBIT; + iport->wwpn = fnic->config.port_wwn; + iport->wwnn = fnic->config.node_wwn; + + iport->max_payload_size = fnic->config.maxdatafieldsize; + + if ((iport->max_payload_size < FNIC_MIN_DATA_FIELD_SIZE) || + (iport->max_payload_size > FNIC_FC_MAX_PAYLOAD_LEN) || + ((iport->max_payload_size % 4) != 0)) { + iport->max_payload_size = FNIC_FC_MAX_PAYLOAD_LEN; + } + + iport->flags |= FNIC_FIRST_LINK_UP; + + timer_setup(&(iport->fabric.retry_timer), fdls_fabric_timer_callback, + 0); + + fnic->stats_reset_time = jiffies; + + INIT_WORK(&fnic->link_work, fnic_handle_link); + INIT_WORK(&fnic->frame_work, fnic_handle_frame); + INIT_WORK(&fnic->tport_work, fnic_tport_event_handler); + INIT_WORK(&fnic->flush_work, fnic_flush_tx); + + INIT_LIST_HEAD(&fnic->frame_queue); + INIT_LIST_HEAD(&fnic->tx_queue); + INIT_LIST_HEAD(&fnic->tport_event_list); + + INIT_DELAYED_WORK(&iport->oxid_pool.schedule_oxid_free_retry, + fdls_schedule_oxid_free_retry_work); + + /* Initialize the oxid reclaim list and work struct */ + INIT_LIST_HEAD(&iport->oxid_pool.oxid_reclaim_list); + INIT_DELAYED_WORK(&iport->oxid_pool.oxid_reclaim_work, fdls_reclaim_oxid_handler); + /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); @@ -880,180 +1040,131 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->hw_copy_wq[i]); - err = fnic_request_intr(fnic); - if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "Unable to request irq.\n"); - goto err_out_request_intr; - } + vnic_dev_enable(fnic->vdev); - /* - * Initialization done with PCI system, hardware, firmware. - * Add host to SCSI - */ - err = scsi_add_host(lp->host, &pdev->dev); + err = fnic_request_intr(fnic); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic: scsi_add_host failed...exiting\n"); - goto err_out_scsi_add_host; + dev_err(&fnic->pdev->dev, "Unable to request irq.\n"); + goto err_out_fnic_request_intr; } + fnic_notify_timer_start(fnic); - /* Start local port initiatialization */ - - lp->link_up = 0; - - lp->max_retry_count = fnic->config.flogi_retries; - lp->max_rport_retry_count = fnic->config.plogi_retries; - lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | - FCP_SPPF_CONF_COMPL); - if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) - lp->service_params |= FCP_SPPF_RETRY; - - lp->boot_time = jiffies; - lp->e_d_tov = fnic->config.ed_tov; - lp->r_a_tov = fnic->config.ra_tov; - lp->link_supported_speeds = FC_PORTSPEED_10GBIT; - fc_set_wwnn(lp, fnic->config.node_wwn); - fc_set_wwpn(lp, fnic->config.port_wwn); - - fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); - - if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, - FCPIO_HOST_EXCH_RANGE_END, NULL)) { - err = -ENOMEM; - goto err_out_fc_exch_mgr_alloc; - } - - fc_lport_init_stats(lp); - fnic->stats_reset_time = jiffies; + fnic_fdls_init(fnic, (fnic->config.flags & VFCF_FIP_CAPABLE)); - fc_lport_config(lp); + err = fnic_scsi_drv_init(fnic); + if (err) + goto err_out_scsi_drv_init; - if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + - sizeof(struct fc_frame_header))) { - err = -EINVAL; - goto err_out_free_exch_mgr; + err = fnic_stats_debugfs_init(fnic); + if (err) { + dev_err(&fnic->pdev->dev, "Failed to initialize debugfs for stats\n"); + goto err_out_free_stats_debugfs; } - fc_host_maxframe_size(lp->host) = lp->mfs; - fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; - sprintf(fc_host_symbolic_name(lp->host), - DRV_NAME " v" DRV_VERSION " over %s", fnic->name); + for (i = 0; i < fnic->intr_count; i++) + vnic_intr_unmask(&fnic->intr[i]); spin_lock_irqsave(&fnic_list_lock, flags); list_add_tail(&fnic->list, &fnic_list); spin_unlock_irqrestore(&fnic_list_lock, flags); - INIT_WORK(&fnic->link_work, fnic_handle_link); - INIT_WORK(&fnic->frame_work, fnic_handle_frame); - INIT_WORK(&fnic->flush_work, fnic_flush_tx); - skb_queue_head_init(&fnic->frame_queue); - skb_queue_head_init(&fnic->tx_queue); - - fc_fabric_login(lp); - - vnic_dev_enable(fnic->vdev); - - for (i = 0; i < fnic->intr_count; i++) - vnic_intr_unmask(&fnic->intr[i]); - - fnic_notify_timer_start(fnic); - return 0; -err_out_free_exch_mgr: - fc_exch_mgr_free(lp); -err_out_fc_exch_mgr_alloc: - fc_remove_host(lp->host); - scsi_remove_host(lp->host); -err_out_scsi_add_host: +err_out_free_stats_debugfs: + fnic_stats_debugfs_remove(fnic); + fnic_free_ioreq_tables_mq(fnic); + scsi_remove_host(fnic->host); +err_out_scsi_drv_init: fnic_free_intr(fnic); -err_out_request_intr: - for (i = 0; i < fnic->rq_count; i++) +err_out_fnic_request_intr: +err_out_alloc_rq_buf: + for (i = 0; i < fnic->rq_count; i++) { + if (ioread32(&fnic->rq[i].ctrl->enable)) + vnic_rq_disable(&fnic->rq[i]); vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); -err_out_rq_buf: + } vnic_dev_notify_unset(fnic->vdev); -err_out_free_max_pool: +err_out_fnic_notify_set: + mempool_destroy(fnic->frame_elem_pool); +err_out_fdls_frame_elem_pool: + mempool_destroy(fnic->frame_pool); +err_out_fdls_frame_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); -err_out_free_ioreq_pool: - mempool_destroy(fnic->io_req_pool); err_out_free_resources: - for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) - kfree(fnic->sw_copy_wq[hwq].io_req_table); fnic_free_vnic_resources(fnic); -err_out_clear_intr: +err_out_fnic_alloc_vnic_res: fnic_clear_intr_mode(fnic); -err_out_dev_close: +err_out_fnic_set_intr_mode: + scsi_host_put(fnic->host); +err_out_fnic_role: +err_out_scsi_host_alloc: +err_out_fnic_get_config: +err_out_dev_mac_addr: +err_out_dev_init: vnic_dev_close(fnic->vdev); -err_out_dev_cmd_deinit: -err_out_vnic_unregister: +err_out_dev_open: +err_out_dev_cmd_init: vnic_dev_unregister(fnic->vdev); -err_out_iounmap: +err_out_dev_register: fnic_iounmap(fnic); -err_out_release_regions: +err_out_fnic_map_bar: +err_out_map_bar: +err_out_set_dma_mask: pci_release_regions(pdev); -err_out_disable_device: +err_out_pci_request_regions: pci_disable_device(pdev); -err_out_free_hba: - fnic_stats_debugfs_remove(fnic); +err_out_pci_enable_device: ida_free(&fnic_ida, fnic->fnic_num); err_out_ida_alloc: - scsi_host_put(lp->host); -err_out: + kfree(fnic); +err_out_fnic_alloc: return err; } static void fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); - struct fc_lport *lp = fnic->lport; unsigned long flags; - int hwq; /* - * Mark state so that the workqueue thread stops forwarding - * received frames and link events to the local port. ISR and - * other threads that can queue work items will also stop - * creating work items on the fnic workqueue + * Sometimes when probe() fails and do not exit with an error code, + * remove() gets called with 'drvdata' not set. Avoid a crash by + * adding a defensive check. */ + if (!fnic) + return; + spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); - if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) - del_timer_sync(&fnic->notify_timer); - /* * Flush the fnic event queue. After this call, there should * be no event queued for this fnic device in the workqueue */ flush_workqueue(fnic_event_queue); - skb_queue_purge(&fnic->frame_queue); - skb_queue_purge(&fnic->tx_queue); + + fnic_scsi_unload(fnic); + + if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) + del_timer_sync(&fnic->notify_timer); if (fnic->config.flags & VFCF_FIP_CAPABLE) { - del_timer_sync(&fnic->fip_timer); - skb_queue_purge(&fnic->fip_frame_queue); + del_timer_sync(&fnic->retry_fip_timer); + del_timer_sync(&fnic->fcs_ka_timer); + del_timer_sync(&fnic->enode_ka_timer); + del_timer_sync(&fnic->vn_ka_timer); + + fnic_free_txq(&fnic->fip_frame_queue); fnic_fcoe_reset_vlans(fnic); - fnic_fcoe_evlist_free(fnic); } - /* - * Log off the fabric. This stops all remote ports, dns port, - * logs off the fabric. This flushes all rport, disc, lport work - * before returning - */ - fc_fabric_logoff(fnic->lport); - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->in_remove = 1; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if ((fnic_fdmi_support == 1) && (fnic->iport.fabric.fdmi_pending > 0)) + del_timer_sync(&fnic->iport.fabric.fdmi_timer); - fcoe_ctlr_destroy(&fnic->ctlr); - fc_lport_destroy(lp); fnic_stats_debugfs_remove(fnic); /* @@ -1063,18 +1174,13 @@ static void fnic_remove(struct pci_dev *pdev) */ fnic_cleanup(fnic); - BUG_ON(!skb_queue_empty(&fnic->frame_queue)); - BUG_ON(!skb_queue_empty(&fnic->tx_queue)); - spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); - fc_remove_host(fnic->lport->host); - scsi_remove_host(fnic->lport->host); - for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) - kfree(fnic->sw_copy_wq[hwq].io_req_table); - fc_exch_mgr_free(fnic->lport); + fnic_free_txq(&fnic->frame_queue); + fnic_free_txq(&fnic->tx_queue); + vnic_dev_notify_unset(fnic->vdev); fnic_free_intr(fnic); fnic_free_vnic_resources(fnic); @@ -1084,8 +1190,11 @@ static void fnic_remove(struct pci_dev *pdev) fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); ida_free(&fnic_ida, fnic->fnic_num); - scsi_host_put(lp->host); + fnic_scsi_unload_cleanup(fnic); + scsi_host_put(fnic->host); + kfree(fnic); } static struct pci_driver fnic_driver = { @@ -1161,6 +1270,24 @@ static int __init fnic_init_module(void) goto err_create_fnic_ioreq_slab; } + fdls_frame_cache = kmem_cache_create("fdls_frames", + FNIC_FCOE_FRAME_MAXSZ, + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fdls_frame_cache) { + pr_err("fnic fdls frame cache create failed\n"); + err = -ENOMEM; + goto err_create_fdls_frame_cache; + } + + fdls_frame_elem_cache = kmem_cache_create("fdls_frame_elem", + sizeof(struct fnic_frame_list), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!fdls_frame_elem_cache) { + pr_err("fnic fdls frame elem cache create failed\n"); + err = -ENOMEM; + goto err_create_fdls_frame_cache_elem; + } + fnic_event_queue = alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, "fnic_event_wq"); if (!fnic_event_queue) { @@ -1177,6 +1304,19 @@ static int __init fnic_init_module(void) goto err_create_fip_workq; } + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) { + reset_fnic_work_queue = + create_singlethread_workqueue("reset_fnic_work_queue"); + if (!reset_fnic_work_queue) { + pr_err("reset fnic work queue create failed\n"); + err = -ENOMEM; + goto err_create_reset_fnic_workq; + } + spin_lock_init(&reset_fnic_list_lock); + INIT_LIST_HEAD(&reset_fnic_list); + INIT_WORK(&reset_fnic_work, fnic_reset_work_handler); + } + fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); if (!fnic_fc_transport) { printk(KERN_ERR PFX "fc_attach_transport error\n"); @@ -1197,8 +1337,15 @@ err_pci_register: err_fc_transport: destroy_workqueue(fnic_fip_queue); err_create_fip_workq: + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) + destroy_workqueue(reset_fnic_work_queue); +err_create_reset_fnic_workq: destroy_workqueue(fnic_event_queue); err_create_fnic_workq: + kmem_cache_destroy(fdls_frame_elem_cache); +err_create_fdls_frame_cache_elem: + kmem_cache_destroy(fdls_frame_cache); +err_create_fdls_frame_cache: kmem_cache_destroy(fnic_io_req_cache); err_create_fnic_ioreq_slab: kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); @@ -1215,11 +1362,18 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); - if (fnic_fip_queue) + + if (pc_rscn_handling_feature_flag == PC_RSCN_HANDLING_FEATURE_ON) + destroy_workqueue(reset_fnic_work_queue); + + if (fnic_fip_queue) { + flush_workqueue(fnic_fip_queue); destroy_workqueue(fnic_fip_queue); + } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); + kmem_cache_destroy(fdls_frame_cache); fc_release_transport(fnic_fc_transport); fnic_trace_free(); fnic_fc_trace_free(); diff --git a/drivers/scsi/fnic/fnic_pci_subsys_devid.c b/drivers/scsi/fnic/fnic_pci_subsys_devid.c new file mode 100644 index 000000000000..36a2c1268422 --- /dev/null +++ b/drivers/scsi/fnic/fnic_pci_subsys_devid.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + */ + +#include <linux/module.h> +#include <linux/mempool.h> +#include <linux/string.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> +#include <linux/kthread.h> +#include <linux/if_ether.h> +#include "fnic.h" + +static struct fnic_pcie_device fnic_pcie_device_table[] = { + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_VASONA, + "VIC 1280"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_COTATI, + "VIC 1240"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_LEXINGTON, "VIC 1225"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_ICEHOUSE, + "VIC 1285"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_KIRKWOODLAKE, "VIC 1225T"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", + PCI_SUBDEVICE_ID_CISCO_SUSANVILLE, "VIC 1227"}, + {PCI_DEVICE_ID_CISCO_SERENO, "Sereno", PCI_SUBDEVICE_ID_CISCO_TORRANCE, + "VIC 1227T"}, + + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CALISTOGA, + "VIC 1340"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTAINVIEW, + "VIC 1380"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN, + "C3260-SIOC"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLEARLAKE, + "VIC 1385"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_MOUNTTIAN2, + "C3260-SIOC"}, + {PCI_DEVICE_ID_CISCO_CRUZ, "Cruz", PCI_SUBDEVICE_ID_CISCO_CLAREMONT, + "VIC 1387"}, + + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRADBURY, + "VIC 1457"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BRENTWOOD, "VIC 1455"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BURLINGAME, "VIC 1487"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BAYSIDE, + "VIC 1485"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BAKERSFIELD, "VIC 1440"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_BOONVILLE, "VIC 1480"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENICIA, + "VIC 1495"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BEAUMONT, + "VIC 1497"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BRISBANE, + "VIC 1467"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", PCI_SUBDEVICE_ID_CISCO_BENTON, + "VIC 1477"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_TWIN_RIVER, "VIC 14425"}, + {PCI_DEVICE_ID_CISCO_BODEGA, "Bodega", + PCI_SUBDEVICE_ID_CISCO_TWIN_PEAK, "VIC 14825"}, + + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_BERN, + "VIC 15420"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_STOCKHOLM, "VIC 15428"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_KRAKOW, + "VIC 15411"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_LUCERNE, "VIC 15231"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_TURKU, + "VIC 15238"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_GENEVA, + "VIC 15422"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_HELSINKI, "VIC 15235"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_GOTHENBURG, "VIC 15425"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", + PCI_SUBDEVICE_ID_CISCO_TURKU_PLUS, "VIC 15237"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_ZURICH, + "VIC 15230"}, + {PCI_DEVICE_ID_CISCO_BEVERLY, "Beverly", PCI_SUBDEVICE_ID_CISCO_RIGA, + "VIC 15427"}, + + {0,} +}; + +int fnic_get_desc_by_devid(struct pci_dev *pdev, char **desc, + char **subsys_desc) +{ + unsigned short device = PCI_DEVICE_ID_CISCO_VIC_FC; + int max = ARRAY_SIZE(fnic_pcie_device_table); + struct fnic_pcie_device *t = fnic_pcie_device_table; + int index = 0; + + if (pdev->device != device) + return 1; + + while (t->device != 0) { + if (memcmp + ((char *) &pdev->subsystem_device, + (char *) &t->subsystem_device, sizeof(short)) == 0) + break; + t++; + index++; + } + + if (index >= max - 1) { + *desc = NULL; + *subsys_desc = NULL; + return 1; + } + + *desc = fnic_pcie_device_table[index].desc; + *subsys_desc = fnic_pcie_device_table[index].subsys_desc; + return 0; +} diff --git a/drivers/scsi/fnic/fnic_res.c b/drivers/scsi/fnic/fnic_res.c index 33dd27f6f24e..763475587b7f 100644 --- a/drivers/scsi/fnic/fnic_res.c +++ b/drivers/scsi/fnic/fnic_res.c @@ -30,9 +30,7 @@ int fnic_get_vnic_config(struct fnic *fnic) offsetof(struct vnic_fc_config, m), \ sizeof(c->m), &c->m); \ if (err) { \ - shost_printk(KERN_ERR, fnic->lport->host, \ - "Error getting %s, %d\n", #m, \ - err); \ + dev_err(&fnic->pdev->dev, "Error getting %s, %d\n", #m, err); \ return err; \ } \ } while (0); @@ -60,6 +58,11 @@ int fnic_get_vnic_config(struct fnic *fnic) GET_CONFIG(intr_mode); GET_CONFIG(wq_copy_count); + if ((c->flags & (VFCF_FC_INITIATOR)) == 0) { + dev_info(&fnic->pdev->dev, "vNIC role not defined (def role: FC Init)\n"); + c->flags |= VFCF_FC_INITIATOR; + } + c->wq_enet_desc_count = min_t(u32, VNIC_FNIC_WQ_DESCS_MAX, max_t(u32, VNIC_FNIC_WQ_DESCS_MIN, @@ -139,40 +142,28 @@ int fnic_get_vnic_config(struct fnic *fnic) c->wq_copy_count = min_t(u16, FNIC_WQ_COPY_MAX, c->wq_copy_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC MAC addr %pM " - "wq/wq_copy/rq %d/%d/%d\n", - fnic->ctlr.ctl_src_addr, + dev_info(&fnic->pdev->dev, "fNIC MAC addr %p wq/wq_copy/rq %d/%d/%d\n", + fnic->data_src_addr, c->wq_enet_desc_count, c->wq_copy_desc_count, c->rq_desc_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC node wwn %llx port wwn %llx\n", + dev_info(&fnic->pdev->dev, "fNIC node wwn 0x%llx port wwn 0x%llx\n", c->node_wwn, c->port_wwn); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC ed_tov %d ra_tov %d\n", + dev_info(&fnic->pdev->dev, "fNIC ed_tov %d ra_tov %d\n", c->ed_tov, c->ra_tov); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC mtu %d intr timer %d\n", + dev_info(&fnic->pdev->dev, "fNIC mtu %d intr timer %d\n", c->maxdatafieldsize, c->intr_timer); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC flags 0x%x luns per tgt %d\n", + dev_info(&fnic->pdev->dev, "fNIC flags 0x%x luns per tgt %d\n", c->flags, c->luns_per_tgt); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC flogi_retries %d flogi timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC flogi_retries %d flogi timeout %d\n", c->flogi_retries, c->flogi_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC plogi retries %d plogi timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC plogi retries %d plogi timeout %d\n", c->plogi_retries, c->plogi_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC io throttle count %d link dn timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC io throttle count %d link dn timeout %d\n", c->io_throttle_count, c->link_down_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC port dn io retries %d port dn timeout %d\n", + dev_info(&fnic->pdev->dev, "fNIC port dn io retries %d port dn timeout %d\n", c->port_down_io_retries, c->port_down_timeout); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC wq_copy_count: %d\n", c->wq_copy_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC intr mode: %d\n", c->intr_mode); + dev_info(&fnic->pdev->dev, "fNIC wq_copy_count: %d\n", c->wq_copy_count); + dev_info(&fnic->pdev->dev, "fNIC intr mode: %d\n", c->intr_mode); return 0; } @@ -206,18 +197,12 @@ void fnic_get_res_counts(struct fnic *fnic) fnic->intr_count = vnic_dev_get_res_count(fnic->vdev, RES_TYPE_INTR_CTRL); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources wq_count: %d\n", fnic->wq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources rq_count: %d\n", fnic->rq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources cq_count: %d\n", fnic->cq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC fw resources intr_count: %d\n", fnic->intr_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources wq_count: %d\n", fnic->wq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources raw_wq_count: %d\n", fnic->raw_wq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources wq_copy_count: %d\n", fnic->wq_copy_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources rq_count: %d\n", fnic->rq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources cq_count: %d\n", fnic->cq_count); + dev_info(&fnic->pdev->dev, "vNIC fw resources intr_count: %d\n", fnic->intr_count); } void fnic_free_vnic_resources(struct fnic *fnic) @@ -253,19 +238,17 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) intr_mode = vnic_dev_get_intr_mode(fnic->vdev); - shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", + dev_info(&fnic->pdev->dev, "vNIC interrupt mode: %s\n", intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC resources avail: wq %d cp_wq %d raw_wq %d rq %d", + dev_info(&fnic->pdev->dev, "res avail: wq %d cp_wq %d raw_wq %d rq %d", fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, fnic->rq_count); - shost_printk(KERN_INFO, fnic->lport->host, - "vNIC resources avail: cq %d intr %d cpy-wq desc count %d\n", + dev_info(&fnic->pdev->dev, "res avail: cq %d intr %d cpy-wq desc count %d\n", fnic->cq_count, fnic->intr_count, fnic->config.wq_copy_desc_count); @@ -340,8 +323,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) RES_TYPE_INTR_PBA_LEGACY, 0); if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { - shost_printk(KERN_ERR, fnic->lport->host, - "Failed to hook legacy pba resource\n"); + dev_err(&fnic->pdev->dev, "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } @@ -444,8 +426,7 @@ int fnic_alloc_vnic_resources(struct fnic *fnic) /* init the stats memory by making the first call here */ err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); if (err) { - shost_printk(KERN_ERR, fnic->lport->host, - "vnic_dev_stats_dump failed - x%x\n", err); + dev_err(&fnic->pdev->dev, "vnic_dev_stats_dump failed - x%x\n", err); goto err_out_cleanup; } diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 2ba61dba4569..7133b254cbe4 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -23,11 +23,13 @@ #include <scsi/scsi_tcq.h> #include <scsi/fc/fc_els.h> #include <scsi/fc/fc_fcoe.h> -#include <scsi/libfc.h> #include <scsi/fc_frame.h> +#include <scsi/scsi_transport_fc.h> #include "fnic_io.h" #include "fnic.h" +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id); + const char *fnic_state_str[] = { [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE", [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE", @@ -65,6 +67,18 @@ static const char *fcpio_status_str[] = { [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND", }; +enum terminate_io_return { + TERM_SUCCESS = 0, + TERM_NO_SC = 1, + TERM_IO_REQ_NOT_FOUND, + TERM_ANOTHER_PORT, + TERM_GSTATE, + TERM_IO_BLOCKED, + TERM_OUT_OF_WQ_DESC, + TERM_TIMED_OUT, + TERM_MISC, +}; + const char *fnic_state_to_str(unsigned int state) { if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state]) @@ -90,8 +104,6 @@ static const char *fnic_fcpio_status_to_str(unsigned int status) return fcpio_status_str[status]; } -static void fnic_cleanup_io(struct fnic *fnic); - /* * Unmap the data buffer and sense buffer for an io_req, * also unmap and free the device-private scatter/gather list. @@ -114,6 +126,65 @@ static void fnic_release_ioreq_buf(struct fnic *fnic, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); } +static bool +fnic_count_portid_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2) +{ + u32 *portid = data1; + unsigned int *count = data2; + struct fnic_io_req *io_req = fnic_priv(sc)->io_req; + + if (!io_req || (*portid && (io_req->port_id != *portid))) + return true; + + *count += 1; + return true; +} + +unsigned int fnic_count_ioreqs(struct fnic *fnic, u32 portid) +{ + unsigned int count = 0; + + fnic_scsi_io_iter(fnic, fnic_count_portid_ioreqs_iter, + &portid, &count); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "portid = 0x%x count = %u\n", portid, count); + return count; +} + +unsigned int fnic_count_all_ioreqs(struct fnic *fnic) +{ + return fnic_count_ioreqs(fnic, 0); +} + +static bool +fnic_count_lun_ioreqs_iter(struct fnic *fnic, struct scsi_cmnd *sc, + void *data1, void *data2) +{ + struct scsi_device *scsi_device = data1; + unsigned int *count = data2; + + if (sc->device != scsi_device || !fnic_priv(sc)->io_req) + return true; + + *count += 1; + return true; +} + +unsigned int +fnic_count_lun_ioreqs(struct fnic *fnic, struct scsi_device *scsi_device) +{ + unsigned int count = 0; + + fnic_scsi_io_iter(fnic, fnic_count_lun_ioreqs_iter, + scsi_device, &count); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "lun = %p count = %u\n", scsi_device, count); + return count; +} + /* Free up Copy Wq descriptors. Called with copy_wq lock held */ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq, unsigned int hwq) { @@ -179,12 +250,11 @@ int fnic_fw_reset_handler(struct fnic *fnic) struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; int ret = 0; unsigned long flags; + unsigned int ioreq_count; /* indicate fwreset to io path */ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET); - - skb_queue_purge(&fnic->frame_queue); - skb_queue_purge(&fnic->tx_queue); + ioreq_count = fnic_count_all_ioreqs(fnic); /* wait for io cmpl */ while (atomic_read(&fnic->in_flight)) @@ -198,6 +268,8 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!vnic_wq_copy_desc_avail(wq)) ret = -EAGAIN; else { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "ioreq_count: %u\n", ioreq_count); fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > @@ -211,11 +283,11 @@ int fnic_fw_reset_handler(struct fnic *fnic) if (!ret) { atomic64_inc(&fnic->fnic_stats.reset_stats.fw_resets); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "Issued fw reset\n"); } else { fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Failed to issue fw reset\n"); } @@ -231,10 +303,10 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) { struct vnic_wq_copy *wq = &fnic->hw_copy_wq[0]; enum fcpio_flogi_reg_format_type format; - struct fc_lport *lp = fnic->lport; u8 gw_mac[ETH_ALEN]; int ret = 0; unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); @@ -246,28 +318,23 @@ int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id) goto flogi_reg_ioreq_end; } - if (fnic->ctlr.map_dest) { - eth_broadcast_addr(gw_mac); - format = FCPIO_FLOGI_REG_DEF_DEST; - } else { - memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN); - format = FCPIO_FLOGI_REG_GW_DEST; - } + memcpy(gw_mac, fnic->iport.fcfmac, ETH_ALEN); + format = FCPIO_FLOGI_REG_GW_DEST; - if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) { + if (fnic->config.flags & VFCF_FIP_CAPABLE) { fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fc_id, gw_mac, - fnic->data_src_addr, - lp->r_a_tov, lp->e_d_tov); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "FLOGI FIP reg issued fcid %x src %pM dest %pM\n", - fc_id, fnic->data_src_addr, gw_mac); + fnic->iport.fpma, + iport->r_a_tov, iport->e_d_tov); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI FIP reg issued fcid: 0x%x src %p dest %p\n", + fc_id, fnic->iport.fpma, gw_mac); } else { fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, format, fc_id, gw_mac); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "FLOGI reg issued fcid 0x%x map %d dest 0x%p\n", - fc_id, fnic->ctlr.map_dest, gw_mac); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FLOGI reg issued fcid 0x%x dest %p\n", + fc_id, gw_mac); } atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); @@ -295,13 +362,17 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, { struct scatterlist *sg; struct fc_rport *rport = starget_to_rport(scsi_target(sc->device)); - struct fc_rport_libfc_priv *rp = rport->dd_data; struct host_sg_desc *desc; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned int i; int flags; u8 exch_flags; struct scsi_lun fc_lun; + struct fnic_tport_s *tport; + struct rport_dd_data_s *rdd_data; + + rdd_data = rport->dd_data; + tport = rdd_data->tport; if (sg_count) { /* For each SGE, create a device desc entry */ @@ -342,7 +413,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, free_wq_copy_descs(fnic, wq, hwq); if (unlikely(!vnic_wq_copy_desc_avail(wq))) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "fnic_queue_wq_copy_desc failure - no descriptors\n"); atomic64_inc(&misc_stats->io_cpwq_alloc_failures); return SCSI_MLQUEUE_HOST_BUSY; @@ -356,7 +427,7 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, exch_flags = 0; if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) && - (rp->flags & FC_RP_FLAGS_RETRY)) + (tport->tgt_flags & FDLS_FC_RP_FLAGS_RETRY)) exch_flags |= FCPIO_ICMND_SRFLAG_RETRY; fnic_queue_wq_copy_desc_icmnd_16(wq, mqtag, @@ -371,8 +442,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic, sc->cmnd, sc->cmd_len, scsi_bufflen(sc), fc_lun.scsi_lun, io_req->port_id, - rport->maxframe_size, rp->r_a_tov, - rp->e_d_tov); + tport->max_payload_size, + tport->r_a_tov, tport->e_d_tov); atomic64_inc(&fnic->fnic_stats.fw_stats.active_fw_reqs); if (atomic64_read(&fnic->fnic_stats.fw_stats.active_fw_reqs) > @@ -388,10 +459,10 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) struct request *const rq = scsi_cmd_to_rq(sc); uint32_t mqtag = 0; void (*done)(struct scsi_cmnd *) = scsi_done; - struct fc_lport *lp = shost_priv(sc->device->host); struct fc_rport *rport; struct fnic_io_req *io_req = NULL; - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(sc->device->host)); + struct fnic_iport_s *iport = NULL; struct fnic_stats *fnic_stats = &fnic->fnic_stats; struct vnic_wq_copy *wq; int ret = 1; @@ -400,32 +471,14 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) unsigned long flags = 0; unsigned long ptr; int io_lock_acquired = 0; - struct fc_rport_libfc_priv *rp; uint16_t hwq = 0; - - mqtag = blk_mq_unique_tag(rq); - spin_lock_irqsave(&fnic->fnic_lock, flags); - - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "fnic IO blocked flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", - fnic->state_flags); - return SCSI_MLQUEUE_HOST_BUSY; - } - - if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "fnic flags: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", - fnic->state_flags); - return SCSI_MLQUEUE_HOST_BUSY; - } + struct fnic_tport_s *tport = NULL; + struct rport_dd_data_s *rdd_data; + uint16_t lun0_delay = 0; rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "returning DID_NO_CONNECT for IO as rport is NULL\n"); sc->result = DID_NO_CONNECT << 16; done(sc); @@ -434,50 +487,96 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) ret = fc_remote_port_chkready(rport); if (ret) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "rport is not ready\n"); - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); sc->result = ret; done(sc); return 0; } - rp = rport->dd_data; - if (!rp || rp->rp_state == RPORT_ST_DELETE) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "rport 0x%x removed, returning DID_NO_CONNECT\n", - rport->port_id); + mqtag = blk_mq_unique_tag(rq); + spin_lock_irqsave(&fnic->fnic_lock, flags); + iport = &fnic->iport; - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); - sc->result = DID_NO_CONNECT<<16; + if (iport->state != FNIC_IPORT_STATE_READY) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_NO_CONNECT for IO as iport state: %d\n", + iport->state); + sc->result = DID_NO_CONNECT << 16; done(sc); return 0; } - if (rp->rp_state != RPORT_ST_READY) { - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n", - rport->port_id, rp->rp_state); + /* fc_remote_port_add() may have added the tport to + * fc_transport but dd_data not yet set + */ + rdd_data = rport->dd_data; + tport = rdd_data->tport; + if (!tport || (rdd_data->iport != iport)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "dd_data not yet set in SCSI for rport portid: 0x%x\n", + rport->port_id); + tport = fnic_find_tport_by_fcid(iport, rport->port_id); + if (!tport) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_BUS_BUSY for IO as tport not found for: 0x%x\n", + rport->port_id); + sc->result = DID_BUS_BUSY << 16; + done(sc); + return 0; + } + + /* Re-assign same params as in fnic_fdls_add_tport */ + rport->maxframe_size = FNIC_FC_MAX_PAYLOAD_LEN; + rport->supported_classes = + FC_COS_CLASS3 | FC_RPORT_ROLE_FCP_TARGET; + /* the dd_data is allocated by fctransport of size dd_fcrport_size */ + rdd_data = rport->dd_data; + rdd_data->tport = tport; + rdd_data->iport = iport; + tport->rport = rport; + tport->flags |= FNIC_FDLS_SCSI_REGISTERED; + } - sc->result = DID_IMM_RETRY << 16; + if ((tport->state != FDLS_TGT_STATE_READY) + && (tport->state != FDLS_TGT_STATE_ADISC)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "returning DID_NO_CONNECT for IO as tport state: %d\n", + tport->state); + sc->result = DID_NO_CONNECT << 16; done(sc); return 0; } - if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + atomic_inc(&fnic->in_flight); + atomic_inc(&tport->in_flight); + + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "state not ready: %d/link not up: %d Returning HOST_BUSY\n", - lp->state, lp->link_up); return SCSI_MLQUEUE_HOST_BUSY; } - atomic_inc(&fnic->in_flight); + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "fnic flags FW reset: 0x%lx. Returning SCSI_MLQUEUE_HOST_BUSY\n", + fnic->state_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + if (!tport->lun0_delay) { + lun0_delay = 1; + tport->lun0_delay++; + } spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_priv(sc)->state = FNIC_IOREQ_NOT_INITED; fnic_priv(sc)->flags = FNIC_NO_FLAGS; @@ -499,6 +598,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) goto out; } + io_req->tport = tport; /* Determine the type of scatter/gather list we need */ io_req->sgl_cnt = sg_count; io_req->sgl_type = FNIC_SGL_CACHE_DFLT; @@ -575,6 +675,7 @@ int fnic_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc) mempool_free(io_req, fnic->io_req_pool); } atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); return ret; } else { atomic64_inc(&fnic_stats->io_stats.active_ios); @@ -602,6 +703,14 @@ out: spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); + + if (lun0_delay) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "LUN0 delay\n"); + mdelay(LUN0_DELAY_TIME); + } + return ret; } @@ -625,7 +734,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, atomic64_inc(&reset_stats->fw_reset_completions); /* Clean up all outstanding io requests */ - fnic_cleanup_io(fnic); + fnic_cleanup_io(fnic, SCSI_NO_TAG); atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0); atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0); @@ -637,44 +746,37 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic, if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) { /* Check status of reset completion */ if (!hdr_status) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "reset cmpl success\n"); /* Ready to send flogi out */ fnic->state = FNIC_IN_ETH_MODE; } else { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "reset failed with header status: %s\n", fnic_fcpio_status_to_str(hdr_status)); - /* - * Unable to change to eth mode, cannot send out flogi - * Change state to fc mode, so that subsequent Flogi - * requests from libFC will cause more attempts to - * reset the firmware. Free the cached flogi - */ fnic->state = FNIC_IN_FC_MODE; atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } } else { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Unexpected state while processing reset completion: %s\n", fnic_state_to_str(fnic->state)); atomic64_inc(&reset_stats->fw_reset_failures); ret = -1; } - /* Thread removing device blocks till firmware reset is complete */ - if (fnic->remove_wait) - complete(fnic->remove_wait); + if (fnic->fw_reset_done) + complete(fnic->fw_reset_done); /* * If fnic is being removed, or fw reset failed * free the flogi frame. Else, send it out */ - if (fnic->remove_wait || ret) { + if (ret) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - skb_queue_purge(&fnic->tx_queue); + fnic_free_txq(&fnic->tx_queue); goto reset_cmpl_handler_end; } @@ -710,19 +812,19 @@ static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic, /* Check flogi registration completion status */ if (!hdr_status) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "flog reg succeeded\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "FLOGI reg succeeded\n"); fnic->state = FNIC_IN_FC_MODE; } else { FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, fnic->fnic_num, - "fnic flogi reg :failed %s\n", + fnic->host, fnic->fnic_num, + "fnic flogi reg failed: %s\n", fnic_fcpio_status_to_str(hdr_status)); fnic->state = FNIC_IN_ETH_MODE; ret = -1; } } else { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Unexpected fnic state %s while" " processing flogi reg completion\n", fnic_state_to_str(fnic->state)); @@ -795,7 +897,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic, spin_unlock_irqrestore(&fnic->wq_copy_lock[wq_index], flags); FNIC_TRACE(fnic_fcpio_ack_handler, - fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], + fnic->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3], ox_id_tag[4], ox_id_tag[5]); } @@ -833,36 +935,36 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind hwq = blk_mq_unique_tag_to_hwq(mqtag); if (hwq != cq_index) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s icmnd completion on the wrong queue\n", fnic_fcpio_status_to_str(hdr_status)); } if (tag >= fnic->fnic_max_tag_id) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s Out of range tag\n", fnic_fcpio_status_to_str(hdr_status)); return; } spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - sc = scsi_host_find_tag(fnic->lport->host, id); + sc = scsi_host_find_tag(fnic->host, id); WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl sc is null - " "hdr status = %s tag = 0x%x desc = 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, desc); FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler, - fnic->lport->host->host_no, id, + fnic->host->host_no, id, ((u64)icmnd_cmpl->_resvd0[1] << 16 | (u64)icmnd_cmpl->_resvd0[0]), ((u64)hdr_status << 16 | @@ -885,7 +987,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind atomic64_inc(&fnic_stats->io_stats.ioreq_null); fnic_priv(sc)->flags |= FNIC_IO_REQ_NULL; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "icmnd_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), id, sc); @@ -912,7 +1014,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if(FCPIO_ABORTED == hdr_status) fnic_priv(sc)->flags |= FNIC_IO_ABORTED; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "icmnd_cmpl abts pending " "hdr status = %s tag = 0x%x sc = 0x%p " "scsi_status = %x residual = %d\n", @@ -943,6 +1045,9 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if (icmnd_cmpl->scsi_status == SAM_STAT_TASK_SET_FULL) atomic64_inc(&fnic_stats->misc_stats.queue_fulls); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "xfer_len: %llu", xfer_len); break; case FCPIO_TIMEOUT: /* request was timed out */ @@ -1004,7 +1109,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind if (hdr_status != FCPIO_SUCCESS) { atomic64_inc(&fnic_stats->io_stats.io_failures); - shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n", + shost_printk(KERN_ERR, fnic->host, "hdr status = %s\n", fnic_fcpio_status_to_str(hdr_status)); } @@ -1024,13 +1129,13 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, unsigned int cq_ind desc, cmd_trace, fnic_flags_and_state(sc)); if (sc->sc_data_direction == DMA_FROM_DEVICE) { - fnic->lport->host_stats.fcp_input_requests++; + fnic_stats->host_stats.fcp_input_requests++; fnic->fcp_input_bytes += xfer_len; } else if (sc->sc_data_direction == DMA_TO_DEVICE) { - fnic->lport->host_stats.fcp_output_requests++; + fnic_stats->host_stats.fcp_output_requests++; fnic->fcp_output_bytes += xfer_len; } else - fnic->lport->host_stats.fcp_control_requests++; + fnic_stats->host_stats.fcp_control_requests++; /* Call SCSI completion function to complete the IO */ scsi_done(sc); @@ -1097,27 +1202,27 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde hwq = blk_mq_unique_tag_to_hwq(id & FNIC_TAG_MASK); if (hwq != cq_index) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s ITMF completion on the wrong queue\n", fnic_fcpio_status_to_str(hdr_status)); } if (tag > fnic->fnic_max_tag_id) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s Tag out of range\n", fnic_fcpio_status_to_str(hdr_status)); return; } else if ((tag == fnic->fnic_max_tag_id) && !(id & FNIC_TAG_DEV_RST)) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x cq index: %d ", hwq, mqtag, tag, cq_index); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hdr status: %s Tag out of range\n", fnic_fcpio_status_to_str(hdr_status)); return; @@ -1133,14 +1238,14 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if (io_req) sc = io_req->sc; } else { - sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK); + sc = scsi_host_find_tag(fnic->host, id & FNIC_TAG_MASK); } WARN_ON_ONCE(!sc); if (!sc) { atomic64_inc(&fnic_stats->io_stats.sc_null); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n", fnic_fcpio_status_to_str(hdr_status), tag); return; @@ -1152,7 +1257,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde atomic64_inc(&fnic_stats->io_stats.ioreq_null); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); fnic_priv(sc)->flags |= FNIC_IO_ABT_TERM_REQ_NULL; - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "itmf_cmpl io_req is null - " "hdr status = %s tag = 0x%x sc 0x%p\n", fnic_fcpio_status_to_str(hdr_status), tag, sc); @@ -1163,7 +1268,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) { /* Abort and terminate completion of device reset req */ /* REVISIT : Add asserts about various flags */ - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Abt/term completion received\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1175,7 +1280,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else if (id & FNIC_TAG_ABORT) { /* Completion of abort cmd */ - shost_printk(KERN_DEBUG, fnic->lport->host, + shost_printk(KERN_DEBUG, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x Abort header status: %s\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1190,7 +1295,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde &term_stats->terminate_fw_timeouts); break; case FCPIO_ITMF_REJECTED: - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "abort reject recd. id %d\n", (int)(id & FNIC_TAG_MASK)); break; @@ -1225,7 +1330,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) atomic64_inc(&misc_stats->no_icmnd_itmf_cmpls); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "abts cmpl recd. id %d status %s\n", (int)(id & FNIC_TAG_MASK), fnic_fcpio_status_to_str(hdr_status)); @@ -1238,11 +1343,11 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde if (io_req->abts_done) { complete(io_req->abts_done); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - shost_printk(KERN_INFO, fnic->lport->host, + shost_printk(KERN_INFO, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x Waking up abort thread\n", hwq, mqtag, tag); } else { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Completing IO\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1273,7 +1378,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde } } else if (id & FNIC_TAG_DEV_RST) { /* Completion of device reset */ - shost_printk(KERN_INFO, fnic->lport->host, + shost_printk(KERN_INFO, fnic->host, "hwq: %d mqtag: 0x%x tag: 0x%x DR hst: %s\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1285,7 +1390,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, 0, fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s Terminate pending\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1298,7 +1403,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde sc->device->host->host_no, id, sc, jiffies_to_msecs(jiffies - start_time), desc, 0, fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "dev reset cmpl recd after time out. " "id %d status %s\n", (int)(id & FNIC_TAG_MASK), @@ -1307,7 +1412,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde } fnic_priv(sc)->state = FNIC_IOREQ_CMD_COMPLETE; fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x hst: %s DR completion received\n", hwq, mqtag, tag, fnic_fcpio_status_to_str(hdr_status)); @@ -1316,7 +1421,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, unsigned int cq_inde spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } else { - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "%s: Unexpected itmf io state: hwq: %d tag 0x%x %s\n", __func__, hwq, id, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); @@ -1371,7 +1476,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev, break; default: - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "firmware completion type %d\n", desc->hdr.type); break; @@ -1414,8 +1519,8 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) struct request *const rq = scsi_cmd_to_rq(sc); struct fnic *fnic = data; struct fnic_io_req *io_req; - unsigned long flags = 0; unsigned long start_time = 0; + unsigned long flags; struct fnic_stats *fnic_stats = &fnic->fnic_stats; uint16_t hwq = 0; int tag; @@ -1432,14 +1537,14 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) io_req = fnic_priv(sc)->io_req; if (!io_req) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d mqtag: 0x%x tag: 0x%x flags: 0x%x No ioreq. Returning\n", hwq, mqtag, tag, fnic_priv(sc)->flags); return true; } if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && - !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { /* * We will be here only when FW completes reset * without sending completions for outstanding ios. @@ -1449,6 +1554,7 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) complete(io_req->dr_done); else if (io_req && io_req->abts_done) complete(io_req->abts_done); + spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } else if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { @@ -1458,19 +1564,19 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) fnic_priv(sc)->io_req = NULL; io_req->sc = NULL; + start_time = io_req->start_time; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); /* * If there is a scsi_cmnd associated with this io_req, then * free the corresponding state */ - start_time = io_req->start_time; fnic_release_ioreq_buf(fnic, io_req, sc); mempool_free(io_req, fnic->io_req_pool); sc->result = DID_TRANSPORT_DISRUPTED << 16; - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, - "mqtag:0x%x tag: 0x%x sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "mqtag: 0x%x tag: 0x%x sc: 0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n", mqtag, tag, sc, (jiffies - start_time)); if (atomic64_read(&fnic->io_cmpl_skip)) @@ -1479,23 +1585,60 @@ static bool fnic_cleanup_io_iter(struct scsi_cmnd *sc, void *data) atomic64_inc(&fnic_stats->io_stats.io_completions); FNIC_TRACE(fnic_cleanup_io, - sc->device->host->host_no, tag, sc, - jiffies_to_msecs(jiffies - start_time), - 0, ((u64)sc->cmnd[0] << 32 | - (u64)sc->cmnd[2] << 24 | - (u64)sc->cmnd[3] << 16 | - (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), - fnic_flags_and_state(sc)); - + sc->device->host->host_no, tag, sc, + jiffies_to_msecs(jiffies - start_time), + 0, ((u64) sc->cmnd[0] << 32 | + (u64) sc->cmnd[2] << 24 | + (u64) sc->cmnd[3] << 16 | + (u64) sc->cmnd[4] << 8 | sc->cmnd[5]), + (((u64) fnic_priv(sc)->flags << 32) | fnic_priv(sc)-> + state)); + + /* Complete the command to SCSI */ scsi_done(sc); - return true; } -static void fnic_cleanup_io(struct fnic *fnic) +static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) { - scsi_host_busy_iter(fnic->lport->host, - fnic_cleanup_io_iter, fnic); + unsigned int io_count = 0; + unsigned long flags; + struct fnic_io_req *io_req = NULL; + struct scsi_cmnd *sc = NULL; + + io_count = fnic_count_all_ioreqs(fnic); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Outstanding ioreq count: %d active io count: %lld Waiting\n", + io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + scsi_host_busy_iter(fnic->host, + fnic_cleanup_io_iter, fnic); + + /* with sg3utils device reset, SC needs to be retrieved from ioreq */ + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + io_req = fnic->sw_copy_wq[0].io_req_table[fnic->fnic_max_tag_id]; + if (io_req) { + sc = io_req->sc; + if (sc) { + if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) + && !(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { + fnic_priv(sc)->flags |= FNIC_DEV_RST_DONE; + if (io_req && io_req->dr_done) + complete(io_req->dr_done); + } + } + } + spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags); + + while ((io_count = fnic_count_all_ioreqs(fnic))) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Outstanding ioreq count: %d active io count: %lld Waiting\n", + io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + schedule_timeout(msecs_to_jiffies(100)); + } } void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, @@ -1516,7 +1659,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, if (id >= fnic->fnic_max_tag_id) return; - sc = scsi_host_find_tag(fnic->lport->host, id); + sc = scsi_host_find_tag(fnic->host, id); if (!sc) return; @@ -1545,7 +1688,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, wq_copy_cleanup_scsi_cmd: sc->result = DID_NO_CONNECT << 16; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "wq_copy_cleanup_handler:" + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "wq_copy_cleanup_handler:" " DID_NO_CONNECT\n"); FNIC_TRACE(fnic_wq_copy_cleanup_handler, @@ -1567,10 +1710,13 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, struct vnic_wq_copy *wq = &fnic->hw_copy_wq[hwq]; struct misc_stats *misc_stats = &fnic->fnic_stats.misc_stats; unsigned long flags; + struct fnic_tport_s *tport = io_req->tport; spin_lock_irqsave(&fnic->fnic_lock, flags); if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); spin_unlock_irqrestore(&fnic->fnic_lock, flags); return 1; } else @@ -1585,7 +1731,8 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag, if (!vnic_wq_copy_desc_avail(wq)) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + atomic_dec(&tport->in_flight); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic_queue_abort_io_req: failure: no descriptors\n"); atomic64_inc(&misc_stats->abts_cpwq_alloc_failures); return 1; @@ -1619,20 +1766,24 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) struct fnic *fnic = iter_data->fnic; int abt_tag = 0; struct fnic_io_req *io_req; - unsigned long flags; struct reset_stats *reset_stats = &fnic->fnic_stats.reset_stats; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct scsi_lun fc_lun; enum fnic_ioreq_state old_ioreq_state; uint16_t hwq = 0; + unsigned long flags; abt_tag = blk_mq_unique_tag(rq); hwq = blk_mq_unique_tag_to_hwq(abt_tag); - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); + if (!sc) { + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "sc is NULL abt_tag: 0x%x hwq: %d\n", abt_tag, hwq); + return true; + } + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; - if (!io_req || io_req->port_id != iter_data->port_id) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; @@ -1640,7 +1791,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && !(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED)) { - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%x flags: 0x%x Device reset is not pending\n", hwq, abt_tag, fnic_priv(sc)->flags); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); @@ -1655,37 +1806,40 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } + if (io_req->abts_done) { - shost_printk(KERN_ERR, fnic->lport->host, - "fnic_rport_exch_reset: io_req->abts_done is set " - "state is %s\n", + shost_printk(KERN_ERR, fnic->host, + "fnic_rport_exch_reset: io_req->abts_done is set state is %s\n", fnic_ioreq_state_to_str(fnic_priv(sc)->state)); } if (!(fnic_priv(sc)->flags & FNIC_IO_ISSUED)) { - shost_printk(KERN_ERR, fnic->lport->host, - "rport_exch_reset " - "IO not yet issued %p tag 0x%x flags " - "%x state %d\n", - sc, abt_tag, fnic_priv(sc)->flags, fnic_priv(sc)->state); + shost_printk(KERN_ERR, fnic->host, + "rport_exch_reset IO not yet issued %p abt_tag 0x%x", + sc, abt_tag); + shost_printk(KERN_ERR, fnic->host, + "flags %x state %d\n", fnic_priv(sc)->flags, + fnic_priv(sc)->state); } old_ioreq_state = fnic_priv(sc)->state; fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; fnic_priv(sc)->abts_status = FCPIO_INVALID_CODE; + if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { atomic64_inc(&reset_stats->device_reset_terminates); abt_tag |= FNIC_TAG_DEV_RST; + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "dev reset sc 0x%p\n", sc); } - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic_rport_exch_reset dev rst sc 0x%p\n", sc); - BUG_ON(io_req->abts_done); - - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic_rport_exch_reset: dev rst sc 0x%p\n", sc); + WARN_ON_ONCE(io_req->abts_done); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "fnic_rport_reset_exch: Issuing abts\n"); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - /* Now queue the abort command to firmware */ + /* Queue the abort command to firmware */ int_to_scsilun(sc->device->lun, &fc_lun); if (fnic_queue_abort_io_req(fnic, abt_tag, @@ -1698,7 +1852,7 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) * lun reset */ spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%x flags: 0x%x Queuing abort failed\n", hwq, abt_tag, fnic_priv(sc)->flags); if (fnic_priv(sc)->state == FNIC_IOREQ_ABTS_PENDING) @@ -1714,11 +1868,14 @@ static bool fnic_rport_abort_io_iter(struct scsi_cmnd *sc, void *data) atomic64_inc(&term_stats->terminates); iter_data->term_cnt++; } + return true; } -static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) +void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) { + unsigned int io_count = 0; + unsigned long flags; struct terminate_stats *term_stats = &fnic->fnic_stats.term_stats; struct fnic_rport_abort_io_iter_data iter_data = { .fnic = fnic, @@ -1726,53 +1883,115 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) .term_cnt = 0, }; - FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, fnic->fnic_num, - "fnic_rport_exch_reset called portid 0x%06x\n", - port_id); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fnic rport exchange reset for tport: 0x%06x\n", + port_id); if (fnic->in_remove) return; - scsi_host_busy_iter(fnic->lport->host, fnic_rport_abort_io_iter, + io_count = fnic_count_ioreqs(fnic, port_id); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Starting terminates: rport:0x%x portid-io-count: %d active-io-count: %lld\n", + port_id, io_count, + atomic64_read(&fnic->fnic_stats.io_stats.active_ios)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + /* Bump in_flight counter to hold off fnic_fw_reset_handler. */ + atomic_inc(&fnic->in_flight); + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) { + atomic_dec(&fnic->in_flight); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + scsi_host_busy_iter(fnic->host, fnic_rport_abort_io_iter, &iter_data); + if (iter_data.term_cnt > atomic64_read(&term_stats->max_terminates)) atomic64_set(&term_stats->max_terminates, iter_data.term_cnt); + atomic_dec(&fnic->in_flight); + + while ((io_count = fnic_count_ioreqs(fnic, port_id))) + schedule_timeout(msecs_to_jiffies(1000)); + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "rport: 0x%x remaining portid-io-count: %d ", + port_id, io_count); } void fnic_terminate_rport_io(struct fc_rport *rport) { - struct fc_rport_libfc_priv *rdata; - struct fc_lport *lport; - struct fnic *fnic; + struct fnic_tport_s *tport; + struct rport_dd_data_s *rdd_data; + struct fnic_iport_s *iport = NULL; + struct fnic *fnic = NULL; if (!rport) { - printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); + pr_err("rport is NULL\n"); return; } - rdata = rport->dd_data; - if (!rdata) { - printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); - return; + rdd_data = rport->dd_data; + if (rdd_data) { + tport = rdd_data->tport; + if (!tport) { + pr_err( + "term rport io called after tport is deleted. Returning 0x%8x\n", + rport->port_id); + } else { + pr_err( + "term rport io called after tport is set 0x%8x\n", + rport->port_id); + pr_err( + "tport maybe rediscovered\n"); + + iport = (struct fnic_iport_s *) tport->iport; + fnic = iport->fnic; + fnic_rport_exch_reset(fnic, rport->port_id); + } } - lport = rdata->local_port; +} - if (!lport) { - printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); - return; - } - fnic = lport_priv(lport); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", - rport->port_name, rport->node_name, rport, - rport->port_id); +/* + * FCP-SCSI specific handling for module unload + * + */ +void fnic_scsi_unload(struct fnic *fnic) +{ + unsigned long flags; - if (fnic->in_remove) - return; + /* + * Mark state so that the workqueue thread stops forwarding + * received frames and link events to the local port. ISR and + * other threads that can queue work items will also stop + * creating work items on the fnic workqueue + */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->iport.state = FNIC_IPORT_STATE_LINK_WAIT; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fdls_get_state(&fnic->iport.fabric) != FDLS_STATE_INIT) + fnic_scsi_fcpio_reset(fnic); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->in_remove = 1; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + fnic_flush_tport_event_list(fnic); + fnic_delete_fcp_tports(fnic); +} - fnic_rport_exch_reset(fnic, rport->port_id); +void fnic_scsi_unload_cleanup(struct fnic *fnic) +{ + int hwq = 0; + + fc_remove_host(fnic->host); + scsi_remove_host(fnic->host); + for (hwq = 0; hwq < fnic->wq_copy_count; hwq++) + kfree(fnic->sw_copy_wq[hwq].io_req_table); } /* @@ -1783,10 +2002,12 @@ void fnic_terminate_rport_io(struct fc_rport *rport) int fnic_abort_cmd(struct scsi_cmnd *sc) { struct request *const rq = scsi_cmd_to_rq(sc); - struct fc_lport *lp; + struct fnic_iport_s *iport; + struct fnic_tport_s *tport; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; + struct rport_dd_data_s *rdd_data; unsigned long flags; unsigned long start_time = 0; int ret = SUCCESS; @@ -1806,11 +2027,11 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ - lp = shost_priv(sc->device->host); - - fnic = lport_priv(lp); + fnic = *((struct fnic **) shost_priv(sc->device->host)); spin_lock_irqsave(&fnic->fnic_lock, flags); + iport = &fnic->iport; + fnic_stats = &fnic->fnic_stats; abts_stats = &fnic->fnic_stats.abts_stats; term_stats = &fnic->fnic_stats.term_stats; @@ -1821,7 +2042,44 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) fnic_priv(sc)->flags = FNIC_NO_FLAGS; - if (lp->state != LPORT_ST_READY || !(lp->link_up)) { + rdd_data = rport->dd_data; + tport = rdd_data->tport; + + if (!tport) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Abort cmd called after tport delete! rport fcid: 0x%x", + rport->port_id); + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "lun: %llu hwq: 0x%x mqtag: 0x%x Op: 0x%x flags: 0x%x\n", + sc->device->lun, hwq, mqtag, + sc->cmnd[0], fnic_priv(sc)->flags); + ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Abort cmd called rport fcid: 0x%x lun: %llu hwq: 0x%x mqtag: 0x%x", + rport->port_id, sc->device->lun, hwq, mqtag); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Op: 0x%x flags: 0x%x\n", + sc->cmnd[0], + fnic_priv(sc)->flags); + + if (iport->state != FNIC_IPORT_STATE_READY) { + atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport NOT in READY state"); + ret = FAILED; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_abort_cmd_end; + } + + if ((tport->state != FDLS_TGT_STATE_READY) && + (tport->state != FDLS_TGT_STATE_ADISC)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport state: %d\n", tport->state); ret = FAILED; spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto fnic_abort_cmd_end; @@ -1843,6 +2101,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; if (!io_req) { + ret = FAILED; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); goto fnic_abort_cmd_end; } @@ -1870,7 +2129,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) else atomic64_inc(&abts_stats->abort_issued_greater_than_60_sec); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "CDB Opcode: 0x%02x Abort issued time: %lu msec\n", sc->cmnd[0], abt_issued_time); /* @@ -1893,7 +2152,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (fc_remote_port_chkready(rport) == 0) task_req = FCPIO_ITMF_ABT_TASK; else { - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); task_req = FCPIO_ITMF_ABT_TASK_TERM; } @@ -1961,7 +2220,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) if (!(fnic_priv(sc)->flags & (FNIC_IO_ABORTED | FNIC_IO_DONE))) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "Issuing host reset due to out of order IO\n"); ret = FAILED; @@ -2009,7 +2268,7 @@ fnic_abort_cmd_end: (u64)sc->cmnd[4] << 8 | sc->cmnd[5]), fnic_flags_and_state(sc)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Returning from abort cmd type %x %s\n", task_req, (ret == SUCCESS) ? "SUCCESS" : "FAILED"); @@ -2027,6 +2286,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, unsigned long flags; uint16_t hwq = 0; uint32_t tag = 0; + struct fnic_tport_s *tport = io_req->tport; tag = io_req->tag; hwq = blk_mq_unique_tag_to_hwq(tag); @@ -2037,8 +2297,10 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, FNIC_FLAGS_IO_BLOCKED))) { spin_unlock_irqrestore(&fnic->fnic_lock, flags); return FAILED; - } else + } else { atomic_inc(&fnic->in_flight); + atomic_inc(&tport->in_flight); + } spin_unlock_irqrestore(&fnic->fnic_lock, flags); spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); @@ -2047,7 +2309,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, free_wq_copy_descs(fnic, wq, hwq); if (!vnic_wq_copy_desc_avail(wq)) { - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "queue_dr_io_req failure - no descriptors\n"); atomic64_inc(&misc_stats->devrst_cpwq_alloc_failures); ret = -EAGAIN; @@ -2072,6 +2334,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic, lr_io_req_end: spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); atomic_dec(&fnic->in_flight); + atomic_dec(&tport->in_flight); return ret; } @@ -2114,7 +2377,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Found IO in %s on lun\n", fnic_ioreq_state_to_str(fnic_priv(sc)->state)); @@ -2124,14 +2387,14 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) } if ((fnic_priv(sc)->flags & FNIC_DEVICE_RESET) && (!(fnic_priv(sc)->flags & FNIC_DEV_RST_ISSUED))) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "dev rst not pending sc 0x%p\n", sc); spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); return true; } if (io_req->abts_done) - shost_printk(KERN_ERR, fnic->lport->host, + shost_printk(KERN_ERR, fnic->host, "%s: io_req->abts_done is set state is %s\n", __func__, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); old_ioreq_state = fnic_priv(sc)->state; @@ -2147,7 +2410,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) BUG_ON(io_req->abts_done); if (fnic_priv(sc)->flags & FNIC_DEVICE_RESET) { - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "dev rst sc 0x%p\n", sc); } @@ -2169,7 +2432,7 @@ static bool fnic_pending_aborts_iter(struct scsi_cmnd *sc, void *data) fnic_priv(sc)->state = old_ioreq_state; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); iter_data->ret = FAILED; - FNIC_SCSI_DBG(KERN_ERR, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, "hwq: %d abt_tag: 0x%lx Abort could not be queued\n", hwq, abt_tag); return false; @@ -2248,7 +2511,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, iter_data.lr_sc = lr_sc; - scsi_host_busy_iter(fnic->lport->host, + scsi_host_busy_iter(fnic->host, fnic_pending_aborts_iter, &iter_data); if (iter_data.ret == FAILED) { ret = iter_data.ret; @@ -2261,7 +2524,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, ret = 1; clean_pending_aborts_end: - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "exit status: %d\n", ret); return ret; } @@ -2274,11 +2537,11 @@ clean_pending_aborts_end: int fnic_device_reset(struct scsi_cmnd *sc) { struct request *rq = scsi_cmd_to_rq(sc); - struct fc_lport *lp; struct fnic *fnic; struct fnic_io_req *io_req = NULL; struct fc_rport *rport; int status; + int count = 0; int ret = FAILED; unsigned long flags; unsigned long start_time = 0; @@ -2289,31 +2552,63 @@ int fnic_device_reset(struct scsi_cmnd *sc) DECLARE_COMPLETION_ONSTACK(tm_done); bool new_sc = 0; uint16_t hwq = 0; + struct fnic_iport_s *iport = NULL; + struct rport_dd_data_s *rdd_data; + struct fnic_tport_s *tport; + u32 old_soft_reset_count; + u32 old_link_down_cnt; + int exit_dr = 0; /* Wait for rport to unblock */ fc_block_scsi_eh(sc); /* Get local-port, check ready and link up */ - lp = shost_priv(sc->device->host); + fnic = *((struct fnic **) shost_priv(sc->device->host)); + iport = &fnic->iport; - fnic = lport_priv(lp); fnic_stats = &fnic->fnic_stats; - reset_stats = &fnic->fnic_stats.reset_stats; + reset_stats = &fnic_stats->reset_stats; atomic64_inc(&reset_stats->device_resets); rport = starget_to_rport(scsi_target(sc->device)); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fcid: 0x%x lun: 0x%llx hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", + + spin_lock_irqsave(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "fcid: 0x%x lun: %llu hwq: %d mqtag: 0x%x flags: 0x%x Device reset\n", rport->port_id, sc->device->lun, hwq, mqtag, fnic_priv(sc)->flags); - if (lp->state != LPORT_ST_READY || !(lp->link_up)) + rdd_data = rport->dd_data; + tport = rdd_data->tport; + if (!tport) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Dev rst called after tport delete! rport fcid: 0x%x lun: %llu\n", + rport->port_id, sc->device->lun); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; + } + + if (iport->state != FNIC_IPORT_STATE_READY) { + atomic64_inc(&fnic_stats->misc_stats.iport_not_ready); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "iport NOT in READY state"); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + goto fnic_device_reset_end; + } + + if ((tport->state != FDLS_TGT_STATE_READY) && + (tport->state != FDLS_TGT_STATE_ADISC)) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "tport state: %d\n", tport->state); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); goto fnic_device_reset_end; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); /* Check if remote port up */ if (fc_remote_port_chkready(rport)) { - atomic64_inc(&fnic_stats->misc_stats.rport_not_ready); + atomic64_inc(&fnic_stats->misc_stats.tport_not_ready); goto fnic_device_reset_end; } @@ -2352,6 +2647,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) io_req->port_id = rport->port_id; io_req->tag = mqtag; fnic_priv(sc)->io_req = io_req; + io_req->tport = tport; io_req->sc = sc; if (fnic->sw_copy_wq[hwq].io_req_table[blk_mq_unique_tag_to_tag(mqtag)] != NULL) @@ -2366,7 +2662,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) fnic_priv(sc)->lr_status = FCPIO_INVALID_CODE; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, "TAG %x\n", mqtag); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "TAG %x\n", mqtag); /* * issue the device reset, if enqueue failed, clean up the ioreq @@ -2383,6 +2679,11 @@ int fnic_device_reset(struct scsi_cmnd *sc) fnic_priv(sc)->flags |= FNIC_DEV_RST_ISSUED; spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); + spin_lock_irqsave(&fnic->fnic_lock, flags); + old_link_down_cnt = iport->fnic->link_down_cnt; + old_soft_reset_count = fnic->soft_reset_count; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + /* * Wait on the local completion for LUN reset. The io_req may be * freed while we wait since we hold no lock. @@ -2390,14 +2691,39 @@ int fnic_device_reset(struct scsi_cmnd *sc) wait_for_completion_timeout(&tm_done, msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); + /* + * Wake up can be due to the following reasons: + * 1) The device reset completed from target. + * 2) Device reset timed out. + * 3) A link-down/host_reset may have happened in between. + * 4) The device reset was aborted and io_req->dr_done was called. + */ + + exit_dr = 0; + spin_lock_irqsave(&fnic->fnic_lock, flags); + if ((old_link_down_cnt != fnic->link_down_cnt) || + (fnic->reset_in_progress) || + (fnic->soft_reset_count != old_soft_reset_count) || + (iport->state != FNIC_IPORT_STATE_READY)) + exit_dr = 1; + + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; if (!io_req) { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "io_req is null mqtag 0x%x sc 0x%p\n", mqtag, sc); goto fnic_device_reset_end; } + + if (exit_dr) { + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Host reset called for fnic. Exit device reset\n"); + io_req->dr_done = NULL; + goto fnic_device_reset_clean; + } io_req->dr_done = NULL; status = fnic_priv(sc)->lr_status; @@ -2408,53 +2734,11 @@ int fnic_device_reset(struct scsi_cmnd *sc) */ if (status == FCPIO_INVALID_CODE) { atomic64_inc(&reset_stats->device_reset_timeouts); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Device reset timed out\n"); fnic_priv(sc)->flags |= FNIC_DEV_RST_TIMED_OUT; - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); int_to_scsilun(sc->device->lun, &fc_lun); - /* - * Issue abort and terminate on device reset request. - * If q'ing of terminate fails, retry it after a delay. - */ - while (1) { - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if (fnic_priv(sc)->flags & FNIC_DEV_RST_TERM_ISSUED) { - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - break; - } - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - if (fnic_queue_abort_io_req(fnic, - mqtag | FNIC_TAG_DEV_RST, - FCPIO_ITMF_ABT_TASK_TERM, - fc_lun.scsi_lun, io_req, hwq)) { - wait_for_completion_timeout(&tm_done, - msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT)); - } else { - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - fnic_priv(sc)->flags |= FNIC_DEV_RST_TERM_ISSUED; - fnic_priv(sc)->state = FNIC_IOREQ_ABTS_PENDING; - io_req->abts_done = &tm_done; - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Abort and terminate issued on Device reset mqtag 0x%x sc 0x%p\n", - mqtag, sc); - break; - } - } - while (1) { - spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); - if (!(fnic_priv(sc)->flags & FNIC_DEV_RST_DONE)) { - spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); - wait_for_completion_timeout(&tm_done, - msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT)); - break; - } else { - io_req = fnic_priv(sc)->io_req; - io_req->abts_done = NULL; - goto fnic_device_reset_clean; - } - } + goto fnic_device_reset_clean; } else { spin_unlock_irqrestore(&fnic->wq_copy_lock[hwq], flags); } @@ -2463,7 +2747,7 @@ int fnic_device_reset(struct scsi_cmnd *sc) if (status != FCPIO_SUCCESS) { spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); FNIC_SCSI_DBG(KERN_DEBUG, - fnic->lport->host, fnic->fnic_num, + fnic->host, fnic->fnic_num, "Device reset completed - failed\n"); io_req = fnic_priv(sc)->io_req; goto fnic_device_reset_clean; @@ -2479,9 +2763,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) if (fnic_clean_pending_aborts(fnic, sc, new_sc)) { spin_lock_irqsave(&fnic->wq_copy_lock[hwq], flags); io_req = fnic_priv(sc)->io_req; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "Device reset failed" - " since could not abort all IOs\n"); + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, + "Device reset failed: Cannot abort all IOs\n"); goto fnic_device_reset_clean; } @@ -2507,6 +2790,15 @@ fnic_device_reset_clean: mempool_free(io_req, fnic->io_req_pool); } + /* + * If link-event is seen while LUN reset is issued we need + * to complete the LUN reset here + */ + if (!new_sc) { + sc->result = DID_RESET << 16; + scsi_done(sc); + } + fnic_device_reset_end: FNIC_TRACE(fnic_device_reset, sc->device->host->host_no, rq->tag, sc, jiffies_to_msecs(jiffies - start_time), @@ -2520,7 +2812,18 @@ fnic_device_reset_end: mutex_unlock(&fnic->sgreset_mutex); } - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, + while ((ret == SUCCESS) && fnic_count_lun_ioreqs(fnic, sc->device)) { + if (count >= 2) { + ret = FAILED; + break; + } + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "Cannot clean up all IOs for the LUN\n"); + schedule_timeout(msecs_to_jiffies(1000)); + count++; + } + + FNIC_SCSI_DBG(KERN_DEBUG, fnic->host, fnic->fnic_num, "Returning from device reset %s\n", (ret == SUCCESS) ? "SUCCESS" : "FAILED"); @@ -2531,67 +2834,78 @@ fnic_device_reset_end: return ret; } -/* Clean up all IOs, clean up libFC local port */ -int fnic_reset(struct Scsi_Host *shost) +static void fnic_post_flogo_linkflap(struct fnic *fnic) +{ + unsigned long flags; + + fnic_fdls_link_status_change(fnic, 0); + spin_lock_irqsave(&fnic->fnic_lock, flags); + + if (fnic->link_status) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fdls_link_status_change(fnic, 1); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/* Logout from all the targets and simulate link flap */ +void fnic_reset(struct Scsi_Host *shost) { - struct fc_lport *lp; struct fnic *fnic; - int ret = 0; struct reset_stats *reset_stats; - lp = shost_priv(shost); - fnic = lport_priv(lp); + fnic = *((struct fnic **) shost_priv(shost)); reset_stats = &fnic->fnic_stats.reset_stats; - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Issuing fnic reset\n"); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issuing fnic reset\n"); atomic64_inc(&reset_stats->fnic_resets); + fnic_post_flogo_linkflap(fnic); - /* - * Reset local port, this will clean up libFC exchanges, - * reset remote port sessions, and if link is up, begin flogi - */ - ret = fc_lport_reset(lp); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Returning from fnic reset"); - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, - "Returning from fnic reset with: %s\n", - (ret == 0) ? "SUCCESS" : "FAILED"); + atomic64_inc(&reset_stats->fnic_reset_completions); +} - if (ret == 0) - atomic64_inc(&reset_stats->fnic_reset_completions); - else - atomic64_inc(&reset_stats->fnic_reset_failures); +int fnic_issue_fc_host_lip(struct Scsi_Host *shost) +{ + int ret = 0; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FC host lip issued"); + ret = fnic_host_reset(shost); return ret; } -/* - * SCSI Error handling calls driver's eh_host_reset if all prior - * error handling levels return FAILED. If host reset completes - * successfully, and if link is up, then Fabric login begins. - * - * Host Reset is the highest level of error recovery. If this fails, then - * host is offlined by SCSI. - * - */ -int fnic_host_reset(struct scsi_cmnd *sc) +int fnic_host_reset(struct Scsi_Host *shost) { - int ret; + int ret = SUCCESS; unsigned long wait_host_tmo; - struct Scsi_Host *shost = sc->device->host; - struct fc_lport *lp = shost_priv(shost); - struct fnic *fnic = lport_priv(lp); + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); unsigned long flags; + struct fnic_iport_s *iport = &fnic->iport; spin_lock_irqsave(&fnic->fnic_lock, flags); - if (!fnic->internal_reset_inprogress) { - fnic->internal_reset_inprogress = true; + if (fnic->reset_in_progress == NOT_IN_PROGRESS) { + fnic->reset_in_progress = IN_PROGRESS; } else { spin_unlock_irqrestore(&fnic->fnic_lock, flags); - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "host reset in progress skipping another host reset\n"); - return SUCCESS; + wait_for_completion_timeout(&fnic->reset_completion_wait, + msecs_to_jiffies(10000)); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->reset_in_progress == IN_PROGRESS) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_WARNING, fnic->host, fnic->fnic_num, + "Firmware reset in progress. Skipping another host reset\n"); + return SUCCESS; + } + fnic->reset_in_progress = IN_PROGRESS; } spin_unlock_irqrestore(&fnic->fnic_lock, flags); @@ -2600,140 +2914,34 @@ int fnic_host_reset(struct scsi_cmnd *sc) * scsi-ml tries to send a TUR to every device if host reset is * successful, so before returning to scsi, fabric should be up */ - ret = (fnic_reset(shost) == 0) ? SUCCESS : FAILED; - if (ret == SUCCESS) { + fnic_reset(shost); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + fnic->reset_in_progress = NOT_IN_PROGRESS; + complete(&fnic->reset_completion_wait); + fnic->soft_reset_count++; + + /* wait till the link is up */ + if (fnic->link_status) { wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ; ret = FAILED; while (time_before(jiffies, wait_host_tmo)) { - if ((lp->state == LPORT_ST_READY) && - (lp->link_up)) { + if (iport->state != FNIC_IPORT_STATE_READY + && fnic->link_status) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + ssleep(1); + spin_lock_irqsave(&fnic->fnic_lock, flags); + } else { ret = SUCCESS; break; } - ssleep(1); } } - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->internal_reset_inprogress = false; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return ret; -} - -/* - * This fxn is called from libFC when host is removed - */ -void fnic_scsi_abort_io(struct fc_lport *lp) -{ - int err = 0; - unsigned long flags; - enum fnic_state old_state; - struct fnic *fnic = lport_priv(lp); - DECLARE_COMPLETION_ONSTACK(remove_wait); - - /* Issue firmware reset for fnic, wait for reset to complete */ -retry_fw_reset: - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) && - fnic->link_events) { - /* fw reset is in progress, poll for its completion */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - schedule_timeout(msecs_to_jiffies(100)); - goto retry_fw_reset; - } - - fnic->remove_wait = &remove_wait; - old_state = fnic->state; - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); spin_unlock_irqrestore(&fnic->fnic_lock, flags); - err = fnic_fw_reset_handler(fnic); - if (err) { - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) - fnic->state = old_state; - fnic->remove_wait = NULL; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - return; - } - - /* Wait for firmware reset to complete */ - wait_for_completion_timeout(&remove_wait, - msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT)); - - spin_lock_irqsave(&fnic->fnic_lock, flags); - fnic->remove_wait = NULL; - FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, fnic->fnic_num, - "fnic_scsi_abort_io %s\n", - (fnic->state == FNIC_IN_ETH_MODE) ? - "SUCCESS" : "FAILED"); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - -} - -/* - * This fxn called from libFC to clean up driver IO state on link down - */ -void fnic_scsi_cleanup(struct fc_lport *lp) -{ - unsigned long flags; - enum fnic_state old_state; - struct fnic *fnic = lport_priv(lp); - - /* issue fw reset */ -retry_fw_reset: - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { - /* fw reset is in progress, poll for its completion */ - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - schedule_timeout(msecs_to_jiffies(100)); - goto retry_fw_reset; - } - old_state = fnic->state; - fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; - fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr); - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - - if (fnic_fw_reset_handler(fnic)) { - spin_lock_irqsave(&fnic->fnic_lock, flags); - if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) - fnic->state = old_state; - spin_unlock_irqrestore(&fnic->fnic_lock, flags); - } - -} - -void fnic_empty_scsi_cleanup(struct fc_lport *lp) -{ -} - -void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did) -{ - struct fnic *fnic = lport_priv(lp); - - /* Non-zero sid, nothing to do */ - if (sid) - goto call_fc_exch_mgr_reset; - - if (did) { - fnic_rport_exch_reset(fnic, did); - goto call_fc_exch_mgr_reset; - } - - /* - * sid = 0, did = 0 - * link down or device being removed - */ - if (!fnic->in_remove) - fnic_scsi_cleanup(lp); - else - fnic_scsi_abort_io(lp); - - /* call libFC exch mgr reset to reset its exchanges */ -call_fc_exch_mgr_reset: - fc_exch_mgr_reset(lp, sid, did); - + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "host reset return status: %d\n", ret); + return ret; } static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) @@ -2771,7 +2979,7 @@ static bool fnic_abts_pending_iter(struct scsi_cmnd *sc, void *data) * Found IO that is still pending with firmware and * belongs to the LUN that we are resetting */ - FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, fnic->fnic_num, + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, "hwq: %d tag: 0x%x Found IO in state: %s on lun\n", hwq, tag, fnic_ioreq_state_to_str(fnic_priv(sc)->state)); @@ -2804,8 +3012,81 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) } /* walk again to check, if IOs are still pending in fw */ - scsi_host_busy_iter(fnic->lport->host, + scsi_host_busy_iter(fnic->host, fnic_abts_pending_iter, &iter_data); return iter_data.ret; } + +/* + * SCSI Error handling calls driver's eh_host_reset if all prior + * error handling levels return FAILED. If host reset completes + * successfully, and if link is up, then Fabric login begins. + * + * Host Reset is the highest level of error recovery. If this fails, then + * host is offlined by SCSI. + * + */ +int fnic_eh_host_reset_handler(struct scsi_cmnd *sc) +{ + int ret = 0; + struct Scsi_Host *shost = sc->device->host; + struct fnic *fnic = *((struct fnic **) shost_priv(shost)); + + FNIC_SCSI_DBG(KERN_ERR, fnic->host, fnic->fnic_num, + "SCSI error handling: fnic host reset"); + + ret = fnic_host_reset(shost); + return ret; +} + + +void fnic_scsi_fcpio_reset(struct fnic *fnic) +{ + unsigned long flags; + enum fnic_state old_state; + struct fnic_iport_s *iport = &fnic->iport; + DECLARE_COMPLETION_ONSTACK(fw_reset_done); + int time_remain; + + /* issue fw reset */ + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) { + /* fw reset is in progress, poll for its completion */ + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "fnic is in unexpected state: %d for fw_reset\n", + fnic->state); + return; + } + + old_state = fnic->state; + fnic->state = FNIC_IN_FC_TRANS_ETH_MODE; + + fnic_update_mac_locked(fnic, iport->hwmac); + fnic->fw_reset_done = &fw_reset_done; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Issuing fw reset\n"); + if (fnic_fw_reset_handler(fnic)) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) + fnic->state = old_state; + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + } else { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Waiting for fw completion\n"); + time_remain = wait_for_completion_timeout(&fw_reset_done, + msecs_to_jiffies(FNIC_FW_RESET_TIMEOUT)); + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "Woken up after fw completion timeout\n"); + if (time_remain == 0) { + FNIC_SCSI_DBG(KERN_INFO, fnic->host, fnic->fnic_num, + "FW reset completion timed out after %d ms)\n", + FNIC_FW_RESET_TIMEOUT); + } + atomic64_inc(&fnic->fnic_stats.reset_stats.fw_reset_timeouts); + } + fnic->fw_reset_done = NULL; +} diff --git a/drivers/scsi/fnic/fnic_stats.h b/drivers/scsi/fnic/fnic_stats.h index 9d7f98c452dd..8ddd20401a59 100644 --- a/drivers/scsi/fnic/fnic_stats.h +++ b/drivers/scsi/fnic/fnic_stats.h @@ -3,6 +3,7 @@ #ifndef _FNIC_STATS_H_ #define _FNIC_STATS_H_ #define FNIC_MQ_MAX_QUEUES 64 +#include <scsi/scsi_transport_fc.h> struct stats_timestamps { struct timespec64 last_reset_time; @@ -63,6 +64,7 @@ struct reset_stats { atomic64_t fw_resets; atomic64_t fw_reset_completions; atomic64_t fw_reset_failures; + atomic64_t fw_reset_timeouts; atomic64_t fnic_resets; atomic64_t fnic_reset_completions; atomic64_t fnic_reset_failures; @@ -102,10 +104,51 @@ struct misc_stats { atomic64_t no_icmnd_itmf_cmpls; atomic64_t check_condition; atomic64_t queue_fulls; - atomic64_t rport_not_ready; + atomic64_t tport_not_ready; + atomic64_t iport_not_ready; atomic64_t frame_errors; atomic64_t current_port_speed; atomic64_t intx_dummy; + atomic64_t port_speed_in_mbps; +}; + +struct fnic_iport_stats { + atomic64_t num_linkdn; + atomic64_t num_linkup; + atomic64_t link_failure_count; + atomic64_t num_rscns; + atomic64_t rscn_redisc; + atomic64_t rscn_not_redisc; + atomic64_t frame_err; + atomic64_t num_rnid; + atomic64_t fabric_flogi_sent; + atomic64_t fabric_flogi_ls_accepts; + atomic64_t fabric_flogi_ls_rejects; + atomic64_t fabric_flogi_misc_rejects; + atomic64_t fabric_plogi_sent; + atomic64_t fabric_plogi_ls_accepts; + atomic64_t fabric_plogi_ls_rejects; + atomic64_t fabric_plogi_misc_rejects; + atomic64_t fabric_scr_sent; + atomic64_t fabric_scr_ls_accepts; + atomic64_t fabric_scr_ls_rejects; + atomic64_t fabric_scr_misc_rejects; + atomic64_t fabric_logo_sent; + atomic64_t tport_alive; + atomic64_t tport_plogi_sent; + atomic64_t tport_plogi_ls_accepts; + atomic64_t tport_plogi_ls_rejects; + atomic64_t tport_plogi_misc_rejects; + atomic64_t tport_prli_sent; + atomic64_t tport_prli_ls_accepts; + atomic64_t tport_prli_ls_rejects; + atomic64_t tport_prli_misc_rejects; + atomic64_t tport_adisc_sent; + atomic64_t tport_adisc_ls_accepts; + atomic64_t tport_adisc_ls_rejects; + atomic64_t tport_logo_sent; + atomic64_t unsupported_frames_ls_rejects; + atomic64_t unsupported_frames_dropped; }; struct fnic_stats { @@ -116,6 +159,7 @@ struct fnic_stats { struct reset_stats reset_stats; struct fw_stats fw_stats; struct vlan_stats vlan_stats; + struct fc_host_statistics host_stats; struct misc_stats misc_stats; }; @@ -127,6 +171,5 @@ struct stats_debug_info { }; int fnic_get_stats_data(struct stats_debug_info *, struct fnic_stats *); -void fnic_stats_debugfs_init(struct fnic *); -void fnic_stats_debugfs_remove(struct fnic *); +const char *fnic_role_to_str(unsigned int role); #endif /* _FNIC_STATS_H_ */ diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index e5e0c0492f23..cdc6b12b1ec2 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -8,6 +8,7 @@ #include <linux/kallsyms.h> #include <linux/time.h> #include <linux/vmalloc.h> +#include <scsi/scsi_transport_fc.h> #include "fnic_io.h" #include "fnic.h" @@ -29,6 +30,17 @@ int fnic_fc_tracing_enabled = 1; int fnic_fc_trace_cleared = 1; static DEFINE_SPINLOCK(fnic_fc_trace_lock); +static const char * const fnic_role_str[] = { + [FNIC_ROLE_FCP_INITIATOR] = "FCP_Initiator", +}; + +const char *fnic_role_to_str(unsigned int role) +{ + if (role >= ARRAY_SIZE(fnic_role_str) || !fnic_role_str[role]) + return "Unknown"; + + return fnic_role_str[role]; +} /* * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information @@ -423,7 +435,8 @@ int fnic_get_stats_data(struct stats_debug_info *debug, "Number of Check Conditions encountered: %lld\n" "Number of QUEUE Fulls: %lld\n" "Number of rport not ready: %lld\n" - "Number of receive frame errors: %lld\n", + "Number of receive frame errors: %lld\n" + "Port speed (in Mbps): %lld\n", (u64)stats->misc_stats.last_isr_time, (s64)val1.tv_sec, val1.tv_nsec, (u64)stats->misc_stats.last_ack_time, @@ -446,18 +459,68 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->misc_stats.no_icmnd_itmf_cmpls), (u64)atomic64_read(&stats->misc_stats.check_condition), (u64)atomic64_read(&stats->misc_stats.queue_fulls), - (u64)atomic64_read(&stats->misc_stats.rport_not_ready), - (u64)atomic64_read(&stats->misc_stats.frame_errors)); - - len += scnprintf(debug->debug_buffer + len, buf_size - len, - "Firmware reported port speed: %llu\n", - (u64)atomic64_read( - &stats->misc_stats.current_port_speed)); + (u64)atomic64_read(&stats->misc_stats.tport_not_ready), + (u64)atomic64_read(&stats->misc_stats.frame_errors), + (u64)atomic64_read(&stats->misc_stats.port_speed_in_mbps)); return len; } +int fnic_get_debug_info(struct stats_debug_info *info, struct fnic *fnic) +{ + struct fnic_iport_s *iport = &fnic->iport; + int buf_size = info->buf_size; + int len = info->buffer_len; + struct fnic_tport_s *tport, *next; + unsigned long flags; + + len += snprintf(info->debug_buffer + len, buf_size - len, + "------------------------------------------\n" + "\t\t Debug Info\n" + "------------------------------------------\n"); + len += snprintf(info->debug_buffer + len, buf_size - len, + "fnic Name:%s number:%d Role:%s State:%s\n", + fnic->name, fnic->fnic_num, + fnic_role_to_str(fnic->role), + fnic_state_to_str(fnic->state)); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "iport State:%d Flags:0x%x vlan_id:%d fcid:0x%x\n", + iport->state, iport->flags, iport->vlan_id, iport->fcid); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "usefip:%d fip_state:%d fip_flogi_retry:%d\n", + iport->usefip, iport->fip.state, iport->fip.flogi_retry); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fpma %02x:%02x:%02x:%02x:%02x:%02x", + iport->fpma[5], iport->fpma[4], iport->fpma[3], + iport->fpma[2], iport->fpma[1], iport->fpma[0]); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fcfmac %02x:%02x:%02x:%02x:%02x:%02x\n", + iport->fcfmac[5], iport->fcfmac[4], iport->fcfmac[3], + iport->fcfmac[2], iport->fcfmac[1], iport->fcfmac[0]); + len += + snprintf(info->debug_buffer + len, buf_size - len, + "fabric state:%d flags:0x%x retry_counter:%d e_d_tov:%d r_a_tov:%d\n", + iport->fabric.state, iport->fabric.flags, + iport->fabric.retry_counter, iport->e_d_tov, + iport->r_a_tov); + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_for_each_entry_safe(tport, next, &iport->tport_list, links) { + len += snprintf(info->debug_buffer + len, buf_size - len, + "tport fcid:0x%x state:%d flags:0x%x inflight:%d retry_counter:%d\n", + tport->fcid, tport->state, tport->flags, + atomic_read(&tport->in_flight), + tport->retry_counter); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return len; +} + /* * fnic_trace_buf_init - Initialize fnic trace buffer logging facility * @@ -678,7 +741,7 @@ int fnic_fc_trace_set_data(u32 host_no, u8 frame_type, */ if (frame_type == FNIC_FC_RECV) { eth_fcoe_hdr_len = sizeof(struct ethhdr) + - sizeof(struct fcoe_hdr); + sizeof(struct fcoe_hdr); memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len); /* Copy the rest of data frame */ memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame, |