summaryrefslogtreecommitdiff
path: root/drivers/nvme/host
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r--drivers/nvme/host/apple.c8
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/host/rdma.c19
-rw-r--r--drivers/nvme/host/tcp.c3
-rw-r--r--drivers/nvme/host/trace.h15
6 files changed, 28 insertions, 40 deletions
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index b317ce6c4ec3..596bb11eeba5 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -209,16 +209,16 @@ static inline struct apple_nvme *queue_to_apple_nvme(struct apple_nvme_queue *q)
{
if (q->is_adminq)
return container_of(q, struct apple_nvme, adminq);
- else
- return container_of(q, struct apple_nvme, ioq);
+
+ return container_of(q, struct apple_nvme, ioq);
}
static unsigned int apple_nvme_queue_depth(struct apple_nvme_queue *q)
{
if (q->is_adminq)
return APPLE_NVME_AQ_DEPTH;
- else
- return APPLE_ANS_MAX_QUEUE_DEPTH;
+
+ return APPLE_ANS_MAX_QUEUE_DEPTH;
}
static void apple_nvme_rtkit_crashed(void *cookie)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 3ffdc80ebb6c..ccb6eb1282f8 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -450,8 +450,8 @@ bool nvme_cancel_request(struct request *req, void *data)
dev_dbg_ratelimited(((struct nvme_ctrl *) data)->device,
"Cancelling I/O %d", req->tag);
- /* don't abort one completed request */
- if (blk_mq_request_completed(req))
+ /* don't abort one completed or idle request */
+ if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT)
return true;
nvme_req(req)->status = NVME_SC_HOST_ABORTED_CMD;
@@ -1674,6 +1674,9 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
struct request_queue *queue = disk->queue;
u32 size = queue_logical_block_size(queue);
+ if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
+ ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
+
if (ctrl->max_discard_sectors == 0) {
blk_queue_max_discard_sectors(queue, 0);
return;
@@ -1688,9 +1691,6 @@ static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
if (queue->limits.max_discard_sectors)
return;
- if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns, UINT_MAX))
- ctrl->max_discard_sectors = nvme_lba_to_sect(ns, ctrl->dmrsl);
-
blk_queue_max_discard_sectors(queue, ctrl->max_discard_sectors);
blk_queue_max_discard_segments(queue, ctrl->max_discard_segments);
@@ -4819,8 +4819,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
u32 aer_notice_type = nvme_aer_subtype(result);
bool requeue = true;
- trace_nvme_async_event(ctrl, aer_notice_type);
-
switch (aer_notice_type) {
case NVME_AER_NOTICE_NS_CHANGED:
set_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events);
@@ -4856,7 +4854,6 @@ static bool nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
static void nvme_handle_aer_persistent_error(struct nvme_ctrl *ctrl)
{
- trace_nvme_async_event(ctrl, NVME_AER_ERROR);
dev_warn(ctrl->device, "resetting controller due to AER\n");
nvme_reset_ctrl(ctrl);
}
@@ -4872,6 +4869,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
if (le16_to_cpu(status) >> 1 != NVME_SC_SUCCESS)
return;
+ trace_nvme_async_event(ctrl, result);
switch (aer_type) {
case NVME_AER_NOTICE:
requeue = nvme_handle_aen_notice(ctrl, result);
@@ -4889,7 +4887,6 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
case NVME_AER_SMART:
case NVME_AER_CSS:
case NVME_AER_VS:
- trace_nvme_async_event(ctrl, aer_type);
ctrl->aen_result = result;
break;
default:
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 282d808400c5..7f25c0fe3a0b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -5,7 +5,6 @@
*/
#include <linux/acpi.h>
-#include <linux/aer.h>
#include <linux/async.h>
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
@@ -2535,7 +2534,6 @@ static int nvme_pci_enable(struct nvme_dev *dev)
nvme_map_cmb(dev);
- pci_enable_pcie_error_reporting(pdev);
pci_save_state(pdev);
result = nvme_pci_configure_admin_queue(dev);
@@ -2600,10 +2598,8 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_suspend_io_queues(dev);
nvme_suspend_queue(dev, 0);
pci_free_irq_vectors(pdev);
- if (pci_is_enabled(pdev)) {
- pci_disable_pcie_error_reporting(pdev);
+ if (pci_is_enabled(pdev))
pci_disable_device(pdev);
- }
nvme_reap_pending_cqes(dev);
nvme_cancel_tagset(&dev->ctrl);
@@ -3443,6 +3439,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */
.driver_data = NVME_QUIRK_BOGUS_NID |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x10ec, 0x5763), /* TEAMGROUP T-FORCE CARDEA ZERO Z330 SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061),
.driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, },
{ PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065),
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index bbad26b82b56..0eb79696fb73 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -12,7 +12,6 @@
#include <linux/string.h>
#include <linux/atomic.h>
#include <linux/blk-mq.h>
-#include <linux/blk-mq-rdma.h>
#include <linux/blk-integrity.h>
#include <linux/types.h>
#include <linux/list.h>
@@ -464,7 +463,6 @@ static int nvme_rdma_create_cq(struct ib_device *ibdev,
struct nvme_rdma_queue *queue)
{
int ret, comp_vector, idx = nvme_rdma_queue_idx(queue);
- enum ib_poll_context poll_ctx;
/*
* Spread I/O queues completion vectors according their queue index.
@@ -473,15 +471,12 @@ static int nvme_rdma_create_cq(struct ib_device *ibdev,
comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
/* Polling queues need direct cq polling context */
- if (nvme_rdma_poll_queue(queue)) {
- poll_ctx = IB_POLL_DIRECT;
+ if (nvme_rdma_poll_queue(queue))
queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size,
- comp_vector, poll_ctx);
- } else {
- poll_ctx = IB_POLL_SOFTIRQ;
+ comp_vector, IB_POLL_DIRECT);
+ else
queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size,
- comp_vector, poll_ctx);
- }
+ comp_vector, IB_POLL_SOFTIRQ);
if (IS_ERR(queue->ib_cq)) {
ret = PTR_ERR(queue->ib_cq);
@@ -2163,10 +2158,8 @@ static void nvme_rdma_map_queues(struct blk_mq_tag_set *set)
ctrl->io_queues[HCTX_TYPE_DEFAULT];
set->map[HCTX_TYPE_READ].queue_offset = 0;
}
- blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
- ctrl->device->dev, 0);
- blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
- ctrl->device->dev, 0);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_DEFAULT]);
+ blk_mq_map_queues(&set->map[HCTX_TYPE_READ]);
if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) {
/* map dedicated poll queues only if we have queues left */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 49c9e7bc9116..bf0230442d57 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -888,6 +888,9 @@ static int nvme_tcp_recv_skb(read_descriptor_t *desc, struct sk_buff *skb,
size_t consumed = len;
int result;
+ if (unlikely(!queue->rd_enabled))
+ return -EFAULT;
+
while (len) {
switch (nvme_tcp_recv_state(queue)) {
case NVME_TCP_RECV_PDU:
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 6f0eaf6a1528..4fb5922ffdac 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -127,15 +127,12 @@ TRACE_EVENT(nvme_async_event,
),
TP_printk("nvme%d: NVME_AEN=%#08x [%s]",
__entry->ctrl_id, __entry->result,
- __print_symbolic(__entry->result,
- aer_name(NVME_AER_NOTICE_NS_CHANGED),
- aer_name(NVME_AER_NOTICE_ANA),
- aer_name(NVME_AER_NOTICE_FW_ACT_STARTING),
- aer_name(NVME_AER_NOTICE_DISC_CHANGED),
- aer_name(NVME_AER_ERROR),
- aer_name(NVME_AER_SMART),
- aer_name(NVME_AER_CSS),
- aer_name(NVME_AER_VS))
+ __print_symbolic(__entry->result & 0x7,
+ aer_name(NVME_AER_ERROR),
+ aer_name(NVME_AER_SMART),
+ aer_name(NVME_AER_NOTICE),
+ aer_name(NVME_AER_CSS),
+ aer_name(NVME_AER_VS))
)
);