summaryrefslogtreecommitdiff
path: root/drivers/net/ipa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Kconfig10
-rw-r--r--drivers/net/ipa/gsi.c405
-rw-r--r--drivers/net/ipa/gsi.h6
-rw-r--r--drivers/net/ipa/gsi_reg.h31
-rw-r--r--drivers/net/ipa/gsi_trans.h1
-rw-r--r--drivers/net/ipa/ipa.h4
-rw-r--r--drivers/net/ipa/ipa_clock.c199
-rw-r--r--drivers/net/ipa/ipa_cmd.c77
-rw-r--r--drivers/net/ipa/ipa_cmd.h24
-rw-r--r--drivers/net/ipa/ipa_data-sc7180.c38
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c38
-rw-r--r--drivers/net/ipa/ipa_data.h26
-rw-r--r--drivers/net/ipa/ipa_endpoint.c127
-rw-r--r--drivers/net/ipa/ipa_main.c43
-rw-r--r--drivers/net/ipa/ipa_mem.c4
-rw-r--r--drivers/net/ipa/ipa_modem.c1
-rw-r--r--drivers/net/ipa/ipa_reg.h22
-rw-r--r--drivers/net/ipa/ipa_table.c16
-rw-r--r--drivers/net/ipa/ipa_table.h8
19 files changed, 615 insertions, 465 deletions
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index 9f0d2a93379c..b68f1289b89e 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -1,9 +1,10 @@
config QCOM_IPA
tristate "Qualcomm IPA support"
- depends on ARCH_QCOM && 64BIT && NET
- depends on QCOM_Q6V5_MSS
+ depends on 64BIT && NET && QCOM_SMEM
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_QMI_HELPERS
- select QCOM_MDT_LOADER
help
Choose Y or M here to include support for the Qualcomm
IP Accelerator (IPA), a hardware block present in some
@@ -11,7 +12,8 @@ config QCOM_IPA
that is capable of generic hardware handling of IP packets,
including routing, filtering, and NAT. Currently the IPA
driver supports only basic transport of network traffic
- between the AP and modem, on the Qualcomm SDM845 SoC.
+ between the AP and modem, on the Qualcomm SDM845 and SC7180
+ SoCs.
Note that if selected, the selection type must match that
of QCOM_Q6V5_COMMON (Y or M).
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 14d9a791924b..390d3403386a 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -89,9 +89,9 @@
/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
#define GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
-#define GSI_CMD_TIMEOUT 5 /* seconds */
+#define GSI_CMD_TIMEOUT 50 /* milliseconds */
-#define GSI_CHANNEL_STOP_RX_RETRIES 10
+#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
@@ -175,6 +175,12 @@ static u32 gsi_channel_id(struct gsi_channel *channel)
return channel - &channel->gsi->channel[0];
}
+/* An initialized channel has a non-null GSI pointer */
+static bool gsi_channel_initialized(struct gsi_channel *channel)
+{
+ return !!channel->gsi;
+}
+
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
@@ -195,8 +201,6 @@ static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
/* Turn off all GSI interrupts initially */
static void gsi_irq_setup(struct gsi *gsi)
{
- u32 adjust;
-
/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);
@@ -206,10 +210,9 @@ static void gsi_irq_setup(struct gsi *gsi)
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
- /* Reverse the offset adjustment for inter-EE register offsets */
- adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
- iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
- iowrite32(0, gsi->virt + adjust + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
+ /* The inter-EE registers are in the non-adjusted address range */
+ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
+ iowrite32(0, gsi->virt_raw + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
@@ -220,7 +223,59 @@ static void gsi_irq_teardown(struct gsi *gsi)
/* Nothing to do */
}
-static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
+/* Event ring commands are performed one at a time. Their completion
+ * is signaled by the event ring control GSI interrupt type, which is
+ * only enabled when we issue an event ring command. Only the event
+ * ring being operated on has this interrupt enabled.
+ */
+static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
+{
+ u32 val = BIT(evt_ring_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+}
+
+/* Disable event ring control interrupts */
+static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_EV_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+}
+
+/* Channel commands are performed one at a time. Their completion is
+ * signaled by the channel control GSI interrupt type, which is only
+ * enabled when we issue a channel command. Only the channel being
+ * operated on has this interrupt enabled.
+ */
+static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
+{
+ u32 val = BIT(channel_id);
+
+ /* There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
+ */
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
+ iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+}
+
+/* Disable channel control interrupts */
+static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
+{
+ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
+ iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+}
+
+static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
{
bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val;
@@ -234,11 +289,11 @@ static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
gsi_irq_type_enable(gsi, GSI_IEOB);
}
-static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
+static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
{
u32 val;
- gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
+ gsi->ieob_enabled_bitmap &= ~event_mask;
/* Disable the interrupt type if this was the last enabled channel */
if (!gsi->ieob_enabled_bitmap)
@@ -248,6 +303,11 @@ static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
}
+static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
+{
+ gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
+}
+
/* Enable all GSI_interrupt types */
static void gsi_irq_enable(struct gsi *gsi)
{
@@ -307,11 +367,13 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
static bool
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
{
+ unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+
reinit_completion(completion);
iowrite32(val, gsi->virt + reg);
- return !!wait_for_completion_timeout(completion, GSI_CMD_TIMEOUT * HZ);
+ return !!wait_for_completion_timeout(completion, timeout);
}
/* Return the hardware's notion of the current state of an event ring */
@@ -326,68 +388,54 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one event ring command at a time, and event
- * control interrupts should only occur when such a command
- * is issued here. Only permit *this* event ring to trigger
- * an interrupt, and only enable the event control IRQ type
- * when we expect it to occur.
- *
- * There's a small chance that a previous command completed
- * after the interrupt was disabled, so make sure we have no
- * pending interrupts before we enable them.
- */
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
-
- val = BIT(evt_ring_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_EV_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_EV_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ev_ctrl_disable(gsi);
- if (success)
+ if (!timeout)
return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
- opcode, evt_ring_id, evt_ring->state);
+ opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state;
/* Get initial event ring state */
- evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
- if (evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return -EINVAL;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
/* If successful the event ring state will have changed */
- if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
return 0;
dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return -EIO;
}
@@ -395,52 +443,55 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- enum gsi_evt_ring_state state = evt_ring->state;
+ enum gsi_evt_ring_state state;
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
/* If successful the event ring state will have changed */
- if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
+ enum gsi_evt_ring_state state;
- if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
return;
}
- evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+ gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
/* If successful the event ring state will have changed */
- if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ state = gsi_evt_ring_state(gsi, evt_ring_id);
+ if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
return;
dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_id, state);
}
/* Fetch the current state of a channel from hardware */
static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
{
u32 channel_id = gsi_channel_id(channel);
- void *virt = channel->gsi->virt;
+ void __iomem *virt = channel->gsi->virt;
u32 val;
val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
@@ -456,34 +507,19 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
- bool success;
+ bool timeout;
u32 val;
- /* We only perform one channel command at a time, and channel
- * control interrupts should only occur when such a command is
- * issued here. So we only permit *this* channel to trigger
- * an interrupt and only enable the channel control IRQ type
- * when we expect it to occur.
- *
- * There's a small chance that a previous command completed
- * after the interrupt was disabled, so make sure we have no
- * pending interrupts before we enable them.
- */
- iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
-
- val = BIT(channel_id);
- iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
- gsi_irq_type_enable(gsi, GSI_CH_CTRL);
+ /* Enable the completion interrupt for the command */
+ gsi_irq_ch_ctrl_enable(gsi, channel_id);
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
- /* Disable the interrupt again */
- gsi_irq_type_disable(gsi, GSI_CH_CTRL);
- iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
+ gsi_irq_ch_ctrl_disable(gsi);
- if (success)
+ if (!timeout)
return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
@@ -589,7 +625,8 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- msleep(1); /* A short delay is required before a RESET command */
+ /* A short delay is required before a RESET command */
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_STOPPED &&
@@ -695,22 +732,38 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
}
-/* Return the last (most recent) transaction completed on a channel. */
+/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
+ const struct list_head *list;
struct gsi_trans *trans;
spin_lock_bh(&trans_info->spinlock);
- if (!list_empty(&trans_info->complete))
- trans = list_last_entry(&trans_info->complete,
- struct gsi_trans, links);
- else if (!list_empty(&trans_info->polled))
- trans = list_last_entry(&trans_info->polled,
- struct gsi_trans, links);
- else
- trans = NULL;
+ /* There is a small chance a TX transaction got allocated just
+ * before we disabled transmits, so check for that.
+ */
+ if (channel->toward_ipa) {
+ list = &trans_info->alloc;
+ if (!list_empty(list))
+ goto done;
+ list = &trans_info->pending;
+ if (!list_empty(list))
+ goto done;
+ }
+
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ */
+ list = &trans_info->complete;
+ if (!list_empty(list))
+ goto done;
+ list = &trans_info->polled;
+ if (list_empty(list))
+ list = NULL;
+done:
+ trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
/* Caller will wait for this, so take a reference */
if (trans)
@@ -734,24 +787,6 @@ static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
}
}
-/* Stop channel activity. Transactions may not be allocated until thawed. */
-static void gsi_channel_freeze(struct gsi_channel *channel)
-{
- gsi_channel_trans_quiesce(channel);
-
- napi_disable(&channel->napi);
-
- gsi_irq_ieob_disable(channel->gsi, channel->evt_ring_id);
-}
-
-/* Allow transactions to be used on the channel again. */
-static void gsi_channel_thaw(struct gsi_channel *channel)
-{
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
-
- napi_enable(&channel->napi);
-}
-
/* Program a channel for use */
static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
{
@@ -843,51 +878,92 @@ static void gsi_channel_deprogram(struct gsi_channel *channel)
/* Nothing to do */
}
-/* Start an allocated GSI channel */
-int gsi_channel_start(struct gsi *gsi, u32 channel_id)
+static int __gsi_channel_start(struct gsi_channel *channel, bool start)
{
- struct gsi_channel *channel = &gsi->channel[channel_id];
+ struct gsi *gsi = channel->gsi;
int ret;
+ if (!start)
+ return 0;
+
mutex_lock(&gsi->mutex);
ret = gsi_channel_start_command(channel);
mutex_unlock(&gsi->mutex);
- gsi_channel_thaw(channel);
-
return ret;
}
-/* Stop a started channel */
-int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+/* Start an allocated GSI channel */
+int gsi_channel_start(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- u32 retries;
int ret;
- gsi_channel_freeze(channel);
+ /* Enable NAPI and the completion interrupt */
+ napi_enable(&channel->napi);
+ gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
- /* RX channels might require a little time to enter STOPPED state */
- retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
+ ret = __gsi_channel_start(channel, true);
+ if (ret) {
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+ }
- mutex_lock(&gsi->mutex);
+ return ret;
+}
+
+static int gsi_channel_stop_retry(struct gsi_channel *channel)
+{
+ u32 retries = GSI_CHANNEL_STOP_RETRIES;
+ int ret;
do {
ret = gsi_channel_stop_command(channel);
if (ret != -EAGAIN)
break;
- msleep(1);
+ usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
} while (retries--);
+ return ret;
+}
+
+static int __gsi_channel_stop(struct gsi_channel *channel, bool stop)
+{
+ struct gsi *gsi = channel->gsi;
+ int ret;
+
+ /* Wait for any underway transactions to complete before stopping. */
+ gsi_channel_trans_quiesce(channel);
+
+ if (!stop)
+ return 0;
+
+ mutex_lock(&gsi->mutex);
+
+ ret = gsi_channel_stop_retry(channel);
+
mutex_unlock(&gsi->mutex);
- /* Thaw the channel if we need to retry (or on error) */
+ return ret;
+}
+
+/* Stop a started channel */
+int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
+{
+ struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
+
+ ret = __gsi_channel_stop(channel, true);
if (ret)
- gsi_channel_thaw(channel);
+ return ret;
- return ret;
+ /* Disable the completion interrupt and NAPI if successful */
+ gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
+ napi_disable(&channel->napi);
+
+ return 0;
}
/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
@@ -912,11 +988,14 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
int gsi_channel_suspend(struct gsi *gsi, u32 channel_id, bool stop)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ int ret;
- if (stop)
- return gsi_channel_stop(gsi, channel_id);
+ ret = __gsi_channel_stop(channel, stop);
+ if (ret)
+ return ret;
- gsi_channel_freeze(channel);
+ /* Ensure NAPI polling has finished. */
+ napi_synchronize(&channel->napi);
return 0;
}
@@ -926,12 +1005,7 @@ int gsi_channel_resume(struct gsi *gsi, u32 channel_id, bool start)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- if (start)
- return gsi_channel_start(gsi, channel_id);
-
- gsi_channel_thaw(channel);
-
- return 0;
+ return __gsi_channel_start(channel, start);
}
/**
@@ -1040,7 +1114,6 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
evt_ring = &gsi->evt_ring[evt_ring_id];
- evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
complete(&evt_ring->completion);
}
@@ -1178,6 +1251,7 @@ static void gsi_isr_ieob(struct gsi *gsi)
u32 event_mask;
event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
+ gsi_irq_ieob_disable(gsi, event_mask);
iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
while (event_mask) {
@@ -1185,7 +1259,6 @@ static void gsi_isr_ieob(struct gsi *gsi)
event_mask ^= BIT(evt_ring_id);
- gsi_irq_ieob_disable(gsi, evt_ring_id);
napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
}
}
@@ -1373,7 +1446,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
/* Hardware requires a 2^n ring size, with alignment equal to size */
ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
if (ring->virt && addr % size) {
- dma_free_coherent(dev, size, ring->virt, ring->addr);
+ dma_free_coherent(dev, size, ring->virt, addr);
dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n",
size);
return -EINVAL; /* Not a good error value, but distinct */
@@ -1430,7 +1503,7 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
}
/* Consult hardware, move any newly completed transactions to completed list */
-static void gsi_channel_update(struct gsi_channel *channel)
+static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1449,7 +1522,7 @@ static void gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return;
+ return NULL;
/* Get the transaction for the latest completed event. Take a
* reference to keep it from completing before we give the events
@@ -1474,6 +1547,8 @@ static void gsi_channel_update(struct gsi_channel *channel)
gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
gsi_trans_free(trans);
+
+ return gsi_channel_trans_complete(channel);
}
/**
@@ -1494,11 +1569,8 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
/* Get the first transaction from the completed list */
trans = gsi_channel_trans_complete(channel);
- if (!trans) {
- /* List is empty; see if there's more to do */
- gsi_channel_update(channel);
- trans = gsi_channel_trans_complete(channel);
- }
+ if (!trans) /* List is empty; see if there's more to do */
+ trans = gsi_channel_update(channel);
if (trans)
gsi_trans_move_polled(trans);
@@ -1521,23 +1593,20 @@ static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
static int gsi_channel_poll(struct napi_struct *napi, int budget)
{
struct gsi_channel *channel;
- int count = 0;
+ int count;
channel = container_of(napi, struct gsi_channel, napi);
- while (count < budget) {
+ for (count = 0; count < budget; count++) {
struct gsi_trans *trans;
- count++;
trans = gsi_channel_poll_one(channel);
if (!trans)
break;
gsi_trans_complete(trans);
}
- if (count < budget) {
- napi_complete(&channel->napi);
- gsi_irq_ieob_enable(channel->gsi, channel->evt_ring_id);
- }
+ if (count < budget && napi_complete(napi))
+ gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
return count;
}
@@ -1575,8 +1644,8 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
u32 evt_ring_id = channel->evt_ring_id;
int ret;
- if (!channel->gsi)
- return 0; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ return 0;
ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
if (ret)
@@ -1612,8 +1681,8 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
u32 evt_ring_id = channel->evt_ring_id;
- if (!channel->gsi)
- return; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ return;
netif_napi_del(&channel->napi);
@@ -1627,7 +1696,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode)
{
struct completion *completion = &gsi->completion;
- bool success;
+ bool timeout;
u32 val;
/* The error global interrupt type is always enabled (until we
@@ -1650,12 +1719,12 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
- success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
- if (success)
+ if (!timeout)
return gsi->result;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
@@ -1707,9 +1776,10 @@ static int gsi_channel_setup(struct gsi *gsi)
while (channel_id < GSI_CHANNEL_COUNT_MAX) {
struct gsi_channel *channel = &gsi->channel[channel_id++];
- if (!channel->gsi)
- continue; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ continue;
+ ret = -EINVAL;
dev_err(gsi->dev, "channel %u not supported by hardware\n",
channel_id - 1);
channel_id = gsi->channel_count;
@@ -2025,8 +2095,8 @@ err_clear_gsi:
/* Inverse of gsi_channel_init_one() */
static void gsi_channel_exit_one(struct gsi_channel *channel)
{
- if (!channel->gsi)
- return; /* Ignore uninitialized channels */
+ if (!gsi_channel_initialized(channel))
+ return;
if (channel->command)
ipa_cmd_pool_exit(channel);
@@ -2114,9 +2184,8 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
gsi->dev = dev;
gsi->version = version;
- /* The GSI layer performs NAPI on all endpoints. NAPI requires a
- * network device structure, but the GSI layer does not have one,
- * so we must create a dummy network device for this purpose.
+ /* GSI uses NAPI on all channels. Create a dummy network device
+ * for the channel NAPI contexts to be associated with.
*/
init_dummy_netdev(&gsi->dummy_dev);
@@ -2141,13 +2210,13 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
return -EINVAL;
}
- gsi->virt = ioremap(res->start, size);
- if (!gsi->virt) {
+ gsi->virt_raw = ioremap(res->start, size);
+ if (!gsi->virt_raw) {
dev_err(dev, "unable to remap \"gsi\" memory\n");
return -ENOMEM;
}
- /* Adjust register range pointer downward for newer IPA versions */
- gsi->virt -= adjust;
+ /* Most registers are accessed using an adjusted register range */
+ gsi->virt = gsi->virt_raw - adjust;
init_completion(&gsi->completion);
@@ -2166,7 +2235,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
err_irq_exit:
gsi_irq_exit(gsi);
err_iounmap:
- iounmap(gsi->virt);
+ iounmap(gsi->virt_raw);
return ret;
}
@@ -2177,7 +2246,7 @@ void gsi_exit(struct gsi *gsi)
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
gsi_irq_exit(gsi);
- iounmap(gsi->virt);
+ iounmap(gsi->virt_raw);
}
/* The maximum number of outstanding TREs on a channel. This limits
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 96c9aed397aa..efc980f96109 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -142,7 +142,6 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
struct completion completion; /* signals event ring state changes */
- enum gsi_evt_ring_state state;
struct gsi_ring ring;
};
@@ -150,7 +149,8 @@ struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
struct net_device dummy_dev; /* needed for NAPI */
- void __iomem *virt;
+ void __iomem *virt_raw; /* I/O mapped address range */
+ void __iomem *virt; /* Adjusted for most registers */
u32 irq;
u32 channel_count;
u32 evt_ring_count;
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index 0e138bbd8205..1622d8cf8dea 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
@@ -38,17 +38,21 @@
* (though the actual limit is hardware-dependent).
*/
-/* GSI EE registers as a group are shifted downward by a fixed
- * constant amount for IPA versions 4.5 and beyond. This applies
- * to all GSI registers we use *except* the ones that disable
- * inter-EE interrupts for channels and event channels.
+/* GSI EE registers as a group are shifted downward by a fixed constant amount
+ * for IPA versions 4.5 and beyond. This applies to all GSI registers we use
+ * *except* the ones that disable inter-EE interrupts for channels and event
+ * channels.
*
- * We handle this by adjusting the pointer to the mapped GSI memory
- * region downward. Then in the one place we use them (gsi_irq_setup())
- * we undo that adjustment for the inter-EE interrupt registers.
+ * The "raw" (not adjusted) GSI register range is mapped, and a pointer to
+ * the mapped range is held in gsi->virt_raw. The inter-EE interrupt
+ * registers are accessed using that pointer.
+ *
+ * Most registers are accessed using gsi->virt, which is a copy of the "raw"
+ * pointer, adjusted downward by the fixed amount.
*/
#define GSI_EE_REG_ADJUST 0x0000d000 /* IPA v4.5+ */
+/* The two inter-EE IRQ register offsets are relative to gsi->virt_raw */
#define GSI_INTER_EE_SRC_CH_IRQ_OFFSET \
GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
#define GSI_INTER_EE_N_SRC_CH_IRQ_OFFSET(ee) \
@@ -59,16 +63,7 @@
#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_OFFSET(ee) \
(0x0000c01c + 0x1000 * (ee))
-#define GSI_INTER_EE_SRC_CH_IRQ_CLR_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0000c028 + 0x1000 * (ee))
-
-#define GSI_INTER_EE_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0000c02c + 0x1000 * (ee))
-
+/* All other register offsets are relative to gsi->virt */
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 4d4606b5fa95..3a4ab8a94d82 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -13,6 +13,7 @@
#include "ipa_cmd.h"
+struct page;
struct scatterlist;
struct device;
struct sk_buff;
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 6c2371084c55..802077631371 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -43,7 +43,7 @@ enum ipa_flag {
* @flags: Boolean state flags
* @version: IPA hardware version
* @pdev: Platform device
- * @modem_rproc: Remoteproc handle for modem subsystem
+ * @completion: Used to signal pipeline clear transfer complete
* @smp2p: SMP2P information
* @clock: IPA clocking information
* @table_addr: DMA address of filter/route table content
@@ -83,7 +83,7 @@ struct ipa {
DECLARE_BITMAP(flags, IPA_FLAG_COUNT);
enum ipa_version version;
struct platform_device *pdev;
- struct rproc *modem_rproc;
+ struct completion completion;
struct notifier_block nb;
void *notifier;
struct ipa_smp2p *smp2p;
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 135c393437f1..69ef6ea41e61 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/refcount.h>
@@ -31,142 +31,154 @@
*/
/**
+ * struct ipa_interconnect - IPA interconnect information
+ * @path: Interconnect path
+ * @average_bandwidth: Average interconnect bandwidth (KB/second)
+ * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
+ */
+struct ipa_interconnect {
+ struct icc_path *path;
+ u32 average_bandwidth;
+ u32 peak_bandwidth;
+};
+
+/**
* struct ipa_clock - IPA clocking information
* @count: Clocking reference count
* @mutex: Protects clock enable/disable
* @core: IPA core clock
- * @memory_path: Memory interconnect
- * @imem_path: Internal memory interconnect
- * @config_path: Configuration space interconnect
- * @interconnect_data: Interconnect configuration data
+ * @interconnect_count: Number of elements in interconnect[]
+ * @interconnect: Interconnect array
*/
struct ipa_clock {
refcount_t count;
struct mutex mutex; /* protects clock enable/disable */
struct clk *core;
- struct icc_path *memory_path;
- struct icc_path *imem_path;
- struct icc_path *config_path;
- const struct ipa_interconnect_data *interconnect_data;
+ u32 interconnect_count;
+ struct ipa_interconnect *interconnect;
};
-static struct icc_path *
-ipa_interconnect_init_one(struct device *dev, const char *name)
+static int ipa_interconnect_init_one(struct device *dev,
+ struct ipa_interconnect *interconnect,
+ const struct ipa_interconnect_data *data)
{
struct icc_path *path;
- path = of_icc_get(dev, name);
- if (IS_ERR(path))
- dev_err(dev, "error %ld getting %s interconnect\n",
- PTR_ERR(path), name);
+ path = of_icc_get(dev, data->name);
+ if (IS_ERR(path)) {
+ int ret = PTR_ERR(path);
+
+ dev_err_probe(dev, ret, "error getting %s interconnect\n",
+ data->name);
- return path;
+ return ret;
+ }
+
+ interconnect->path = path;
+ interconnect->average_bandwidth = data->average_bandwidth;
+ interconnect->peak_bandwidth = data->peak_bandwidth;
+
+ return 0;
}
-/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev)
+static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
{
- struct icc_path *path;
-
- path = ipa_interconnect_init_one(dev, "memory");
- if (IS_ERR(path))
- goto err_return;
- clock->memory_path = path;
+ icc_put(interconnect->path);
+ memset(interconnect, 0, sizeof(*interconnect));
+}
- path = ipa_interconnect_init_one(dev, "imem");
- if (IS_ERR(path))
- goto err_memory_path_put;
- clock->imem_path = path;
+/* Initialize interconnects required for IPA operation */
+static int ipa_interconnect_init(struct ipa_clock *clock, struct device *dev,
+ const struct ipa_interconnect_data *data)
+{
+ struct ipa_interconnect *interconnect;
+ u32 count;
+ int ret;
- path = ipa_interconnect_init_one(dev, "config");
- if (IS_ERR(path))
- goto err_imem_path_put;
- clock->config_path = path;
+ count = clock->interconnect_count;
+ interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
+ if (!interconnect)
+ return -ENOMEM;
+ clock->interconnect = interconnect;
+
+ while (count--) {
+ ret = ipa_interconnect_init_one(dev, interconnect, data++);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_put:
- icc_put(clock->imem_path);
-err_memory_path_put:
- icc_put(clock->memory_path);
-err_return:
- return PTR_ERR(path);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
+
+ return ret;
}
/* Inverse of ipa_interconnect_init() */
static void ipa_interconnect_exit(struct ipa_clock *clock)
{
- icc_put(clock->config_path);
- icc_put(clock->imem_path);
- icc_put(clock->memory_path);
+ struct ipa_interconnect *interconnect;
+
+ interconnect = clock->interconnect + clock->interconnect_count;
+ while (interconnect-- > clock->interconnect)
+ ipa_interconnect_exit_one(interconnect);
+ kfree(clock->interconnect);
+ clock->interconnect = NULL;
}
/* Currently we only use one bandwidth level, so just "enable" interconnects */
static int ipa_interconnect_enable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
int ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
- if (ret)
- return ret;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_memory_path_disable;
-
- data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->config_path, data->average_rate,
- data->peak_rate);
- if (ret)
- goto err_imem_path_disable;
+ u32 i;
+
+ interconnect = clock->interconnect;
+ for (i = 0; i < clock->interconnect_count; i++) {
+ ret = icc_set_bw(interconnect->path,
+ interconnect->average_bandwidth,
+ interconnect->peak_bandwidth);
+ if (ret)
+ goto out_unwind;
+ interconnect++;
+ }
return 0;
-err_imem_path_disable:
- (void)icc_set_bw(clock->imem_path, 0, 0);
-err_memory_path_disable:
- (void)icc_set_bw(clock->memory_path, 0, 0);
+out_unwind:
+ while (interconnect-- > clock->interconnect)
+ (void)icc_set_bw(interconnect->path, 0, 0);
return ret;
}
/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+static void ipa_interconnect_disable(struct ipa *ipa)
{
- const struct ipa_interconnect_data *data;
+ struct ipa_interconnect *interconnect;
struct ipa_clock *clock = ipa->clock;
+ int result = 0;
+ u32 count;
int ret;
- ret = icc_set_bw(clock->memory_path, 0, 0);
- if (ret)
- return ret;
-
- ret = icc_set_bw(clock->imem_path, 0, 0);
- if (ret)
- goto err_memory_path_reenable;
-
- ret = icc_set_bw(clock->config_path, 0, 0);
- if (ret)
- goto err_imem_path_reenable;
-
- return 0;
-
-err_imem_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- (void)icc_set_bw(clock->imem_path, data->average_rate,
- data->peak_rate);
-err_memory_path_reenable:
- data = &clock->interconnect_data[IPA_INTERCONNECT_MEMORY];
- (void)icc_set_bw(clock->memory_path, data->average_rate,
- data->peak_rate);
+ count = clock->interconnect_count;
+ interconnect = clock->interconnect + count;
+ while (count--) {
+ interconnect--;
+ ret = icc_set_bw(interconnect->path, 0, 0);
+ if (ret && !result)
+ result = ret;
+ }
- return ret;
+ if (result)
+ dev_err(&ipa->pdev->dev,
+ "error %d disabling IPA interconnects\n", ret);
}
/* Turn on IPA clocks, including interconnects */
@@ -189,7 +201,7 @@ static int ipa_clock_enable(struct ipa *ipa)
static void ipa_clock_disable(struct ipa *ipa)
{
clk_disable_unprepare(ipa->clock->core);
- (void)ipa_interconnect_disable(ipa);
+ ipa_interconnect_disable(ipa);
}
/* Get an IPA clock reference, but only if the reference count is
@@ -269,7 +281,8 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
clk = clk_get(dev, "core");
if (IS_ERR(clk)) {
- dev_err(dev, "error %ld getting core clock\n", PTR_ERR(clk));
+ dev_err_probe(dev, PTR_ERR(clk), "error getting core clock\n");
+
return ERR_CAST(clk);
}
@@ -286,9 +299,9 @@ ipa_clock_init(struct device *dev, const struct ipa_clock_data *data)
goto err_clk_put;
}
clock->core = clk;
- clock->interconnect_data = data->interconnect;
+ clock->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(clock, dev);
+ ret = ipa_interconnect_init(clock, dev, data->interconnect_data);
if (ret)
goto err_kfree;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 002e51448510..35e35852c25c 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -244,11 +244,15 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
if (ipa->version != IPA_VERSION_3_5_1)
bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
BUILD_BUG_ON(bit_count > 32);
- offset_max = ~0 >> (32 - bit_count);
+ offset_max = ~0U >> (32 - bit_count);
+ /* Make sure the offset can be represented by the field(s)
+ * that holds it. Also make sure the offset is not outside
+ * the overall IPA memory range.
+ */
if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
- ipa->mem_offset + offset, offset_max);
+ name, ipa->mem_offset, offset, offset_max);
return false;
}
@@ -261,12 +265,24 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
const char *name;
u32 offset;
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
- name = "filter/route hash flush";
- if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
- return false;
+ /* If hashed tables are supported, ensure the hash flush register
+ * offset will fit in a register write IPA immediate command.
+ */
+ if (ipa_table_hash_support(ipa)) {
+ offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ name = "filter/route hash flush";
+ if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
+ return false;
+ }
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT);
+ /* Each endpoint can have a status endpoint associated with it,
+ * and this is recorded in an endpoint register. If the modem
+ * crashes, we reset the status endpoint for all modem endpoints
+ * using a register write IPA immediate command. Make sure the
+ * worst case (highest endpoint number) offset of that endpoint
+ * fits in the register write command field(s) that must hold it.
+ */
+ offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -529,7 +545,7 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
direction, opcode);
}
-static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
+static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
@@ -543,14 +559,14 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans, u64 tag)
cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
payload = &cmd_payload->ip_packet_tag_status;
- payload->tag = u64_encode_bits(tag, IP_PACKET_TAG_STATUS_TAG_FMASK);
+ payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
direction, opcode);
}
/* Issue a small command TX data transfer */
-static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
+static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum dma_data_direction direction = DMA_TO_DEVICE;
@@ -558,8 +574,6 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
- /* assert(size <= sizeof(*payload)); */
-
/* Just transfer a zero-filled payload structure */
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
@@ -567,34 +581,53 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans, u16 size)
direction, opcode);
}
-void ipa_cmd_tag_process_add(struct gsi_trans *trans)
+/* Add immediate commands to a transaction to clear the hardware pipeline */
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
struct ipa_endpoint *endpoint;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
+ /* This will complete when the transfer is received */
+ reinit_completion(&ipa->completion);
+ /* Issue a no-op register write command (mask 0 means no write) */
ipa_cmd_register_write_add(trans, 0, 0, 0, true);
+
+ /* Send a data packet through the IPA pipeline. The packet_init
+ * command says to send the next packet directly to the exception
+ * endpoint without any other IPA processing. The tag_status
+ * command requests that status be generated on completion of
+ * that transfer, and that it will be tagged with a value.
+ * Finally, the transfer command sends a small packet of data
+ * (instead of a command) using the command endpoint.
+ */
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
- ipa_cmd_ip_tag_status_add(trans, 0xcba987654321);
- ipa_cmd_transfer_add(trans, 4);
+ ipa_cmd_ip_tag_status_add(trans);
+ ipa_cmd_transfer_add(trans);
}
-/* Returns the number of commands required for the tag process */
-u32 ipa_cmd_tag_process_count(void)
+/* Returns the number of commands required to clear the pipeline */
+u32 ipa_cmd_pipeline_clear_count(void)
{
return 4;
}
-void ipa_cmd_tag_process(struct ipa *ipa)
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
+{
+ wait_for_completion(&ipa->completion);
+}
+
+void ipa_cmd_pipeline_clear(struct ipa *ipa)
{
- u32 count = ipa_cmd_tag_process_count();
+ u32 count = ipa_cmd_pipeline_clear_count();
struct gsi_trans *trans;
trans = ipa_cmd_trans_alloc(ipa, count);
if (trans) {
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
} else {
dev_err(&ipa->pdev->dev,
"error allocating %u entry tag transaction\n", count);
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 4ed09c486abc..6dd3d35cf315 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -157,26 +157,30 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset,
u16 size, dma_addr_t addr, bool toward_ipa);
/**
- * ipa_cmd_tag_process_add() - Add IPA tag process commands to a transaction
+ * ipa_cmd_pipeline_clear_add() - Add pipeline clear commands to a transaction
* @trans: GSI transaction
*/
-void ipa_cmd_tag_process_add(struct gsi_trans *trans);
+void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans);
/**
- * ipa_cmd_tag_process_add_count() - Number of commands in a tag process
+ * ipa_cmd_pipeline_clear_count() - # commands required to clear pipeline
*
* Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * to hold commands to clear the pipeline
*/
-u32 ipa_cmd_tag_process_count(void);
+u32 ipa_cmd_pipeline_clear_count(void);
/**
- * ipa_cmd_tag_process() - Perform a tag process
- *
- * @Return: The number of elements to allocate in a transaction
- * to hold tag process commands
+ * ipa_cmd_pipeline_clear_wait() - Wait pipeline clear to complete
+ * @ipa: - IPA pointer
+ */
+void ipa_cmd_pipeline_clear_wait(struct ipa *ipa);
+
+/**
+ * ipa_cmd_pipeline_clear() - Clear the hardware pipeline
+ * @ipa: - IPA pointer
*/
-void ipa_cmd_tag_process(struct ipa *ipa);
+void ipa_cmd_pipeline_clear(struct ipa *ipa);
/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
diff --git a/drivers/net/ipa/ipa_data-sc7180.c b/drivers/net/ipa/ipa_data-sc7180.c
index 5cc0ed77edb9..997b51ceb7d7 100644
--- a/drivers/net/ipa/ipa_data-sc7180.c
+++ b/drivers/net/ipa/ipa_data-sc7180.c
@@ -309,24 +309,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 465000, /* 465 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 68570, /* 68.570 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 30000, /* 30 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 100 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 465000, /* 465 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 68570, /* 68.570 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 30000, /* 30 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SC7180 SoC. */
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index f8fee8d3ca42..88c9c3562ab7 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -329,24 +329,30 @@ static struct ipa_mem_data ipa_mem_data = {
.smem_size = 0x00002000,
};
+/* Interconnect bandwidths are in 1000 byte/second units */
+static struct ipa_interconnect_data ipa_interconnect_data[] = {
+ {
+ .name = "memory",
+ .peak_bandwidth = 600000, /* 600 MBps */
+ .average_bandwidth = 80000, /* 80 MBps */
+ },
+ /* Average bandwidth is unused for the next two interconnects */
+ {
+ .name = "imem",
+ .peak_bandwidth = 350000, /* 350 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+ {
+ .name = "config",
+ .peak_bandwidth = 40000, /* 40 MBps */
+ .average_bandwidth = 0, /* unused */
+ },
+};
+
static struct ipa_clock_data ipa_clock_data = {
.core_clock_rate = 75 * 1000 * 1000, /* Hz */
- /* Interconnect rates are in 1000 byte/second units */
- .interconnect = {
- [IPA_INTERCONNECT_MEMORY] = {
- .peak_rate = 600000, /* 600 MBps */
- .average_rate = 80000, /* 80 MBps */
- },
- /* Average rate is unused for the next two interconnects */
- [IPA_INTERCONNECT_IMEM] = {
- .peak_rate = 350000, /* 350 MBps */
- .average_rate = 0, /* unused */
- },
- [IPA_INTERCONNECT_CONFIG] = {
- .peak_rate = 40000, /* 40 MBps */
- .average_rate = 0, /* unused */
- },
- },
+ .interconnect_count = ARRAY_SIZE(ipa_interconnect_data),
+ .interconnect_data = ipa_interconnect_data,
};
/* Configuration data for the SDM845 SoC. */
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 0ed5ffe2b8da..b476fc373f7f 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -258,32 +258,28 @@ struct ipa_mem_data {
u32 smem_size;
};
-/** enum ipa_interconnect_id - IPA interconnect identifier */
-enum ipa_interconnect_id {
- IPA_INTERCONNECT_MEMORY,
- IPA_INTERCONNECT_IMEM,
- IPA_INTERCONNECT_CONFIG,
- IPA_INTERCONNECT_COUNT, /* Last; not an interconnect */
-};
-
/**
- * struct ipa_interconnect_data - description of IPA interconnect rates
- * @peak_rate: Peak interconnect bandwidth (in 1000 byte/sec units)
- * @average_rate: Average interconnect bandwidth (in 1000 byte/sec units)
+ * struct ipa_interconnect_data - description of IPA interconnect bandwidths
+ * @name: Interconnect name (matches interconnect-name in DT)
+ * @peak_bandwidth: Peak interconnect bandwidth (in 1000 byte/sec units)
+ * @average_bandwidth: Average interconnect bandwidth (in 1000 byte/sec units)
*/
struct ipa_interconnect_data {
- u32 peak_rate;
- u32 average_rate;
+ const char *name;
+ u32 peak_bandwidth;
+ u32 average_bandwidth;
};
/**
* struct ipa_clock_data - description of IPA clock and interconnect rates
* @core_clock_rate: Core clock rate (Hz)
- * @interconnect: Array of interconnect bandwidth parameters
+ * @interconnect_count: Number of entries in the interconnect_data array
+ * @interconnect_data: IPA interconnect configuration data
*/
struct ipa_clock_data {
u32 core_clock_rate;
- struct ipa_interconnect_data interconnect[IPA_INTERCONNECT_COUNT];
+ u32 interconnect_count; /* # entries in interconnect_data[] */
+ const struct ipa_interconnect_data *interconnect_data;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f4be9812a1f..7209ee3c3124 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -69,8 +69,11 @@ struct ipa_status {
};
/* Field masks for struct ipa_status structure fields */
+#define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
+#define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
+#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
#ifdef IPA_VALIDATE
@@ -171,9 +174,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 limit;
- /* Not sure where this constraint come from... */
- BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
-
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
count, IPA_ENDPOINT_COUNT);
@@ -399,7 +399,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
* That won't happen, and we could be more precise, but this is fine
* for now. We need to end the transaction with a "tag process."
*/
- count = hweight32(initialized) + ipa_cmd_tag_process_count();
+ count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -428,11 +428,13 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
}
- ipa_cmd_tag_process_add(trans);
+ ipa_cmd_pipeline_clear_add(trans);
/* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
+ ipa_cmd_pipeline_clear_wait(ipa);
+
return 0;
}
@@ -588,7 +590,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
/* Note that HDR_ENDIANNESS indicates big endian header fields */
if (endpoint->data->qmap)
- val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
+ val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -1015,31 +1017,34 @@ err_free_pages:
}
/**
- * ipa_endpoint_replenish() - Replenish the Rx packets cache.
+ * ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @count: Number of buffers to send to hardware
+ * @add_one: Whether this is replacing a just-consumed buffer
*
- * Allocate RX packet wrapper structures with maximal socket buffers
- * for an endpoint. These are supplied to the hardware, which fills
- * them with incoming data.
+ * The IPA hardware can hold a fixed number of receive buffers for an RX
+ * endpoint, based on the number of entries in the underlying channel ring
+ * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
+ * more receive buffers can be supplied to the hardware. Replenishing for
+ * an endpoint can be disabled, in which case requests to replenish a
+ * buffer are "saved", and transferred to the backlog once it is re-enabled
+ * again.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
if (!endpoint->replenish_enabled) {
- if (count)
- atomic_add(count, &endpoint->replenish_saved);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_saved);
return;
}
-
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
return;
@@ -1047,8 +1052,8 @@ try_again_later:
/* The last one didn't succeed, so fix the backlog */
backlog = atomic_inc_return(&endpoint->replenish_backlog);
- if (count)
- atomic_add(count, &endpoint->replenish_backlog);
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -1075,7 +1080,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
/* Start replenishing if hardware currently has no buffers */
max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
@@ -1094,7 +1099,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, 0);
+ ipa_endpoint_replenish(endpoint, false);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1164,19 +1169,53 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
return true;
if (!status->pkt_len)
return true;
- endpoint_id = u32_get_bits(status->endp_dst_idx,
- IPA_STATUS_DST_IDX_FMASK);
+ endpoint_id = u8_get_bits(status->endp_dst_idx,
+ IPA_STATUS_DST_IDX_FMASK);
if (endpoint_id != endpoint->endpoint_id)
return true;
return false; /* Don't skip this packet, process it */
}
+static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
+{
+ struct ipa_endpoint *command_endpoint;
+ struct ipa *ipa = endpoint->ipa;
+ u32 endpoint_id;
+
+ if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
+ return false; /* No valid tag */
+
+ /* The status contains a valid tag. We know the packet was sent to
+ * this endpoint (already verified by ipa_endpoint_status_skip()).
+ * If the packet came from the AP->command TX endpoint we know
+ * this packet was sent as part of the pipeline clear process.
+ */
+ endpoint_id = u8_get_bits(status->endp_src_idx,
+ IPA_STATUS_SRC_IDX_FMASK);
+ command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (endpoint_id == command_endpoint->endpoint_id) {
+ complete(&ipa->completion);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "unexpected tagged packet from endpoint %u\n",
+ endpoint_id);
+ }
+
+ return true;
+}
+
/* Return whether the status indicates the packet should be dropped */
-static bool ipa_status_drop_packet(const struct ipa_status *status)
+static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
+ const struct ipa_status *status)
{
u32 val;
+ /* If the status indicates a tagged transfer, we'll drop the packet */
+ if (ipa_endpoint_status_tag(endpoint, status))
+ return true;
+
/* Deaggregation exceptions we drop; all other types we consume */
if (status->exception)
return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
@@ -1213,12 +1252,11 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
continue;
}
- /* Compute the amount of buffer space consumed by the
- * packet, including the status element. If the hardware
- * is configured to pad packet data to an aligned boundary,
- * account for that. And if checksum offload is is enabled
- * a trailer containing computed checksum information will
- * be appended.
+ /* Compute the amount of buffer space consumed by the packet,
+ * including the status element. If the hardware is configured
+ * to pad packet data to an aligned boundary, account for that.
+ * And if checksum offload is enabled a trailer containing
+ * computed checksum information will be appended.
*/
align = endpoint->data->rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
@@ -1226,16 +1264,21 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
if (endpoint->data->checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
- /* Charge the new packet with a proportional fraction of
- * the unused space in the original receive buffer.
- * XXX Charge a proportion of the *whole* receive buffer?
- */
- if (!ipa_status_drop_packet(status)) {
- u32 extra = unused * len / total_len;
- void *data2 = data + sizeof(*status);
- u32 len2 = le16_to_cpu(status->pkt_len);
+ if (!ipa_endpoint_status_drop(endpoint, status)) {
+ void *data2;
+ u32 extra;
+ u32 len2;
/* Client receives only packet data (no status) */
+ data2 = data + sizeof(*status);
+ len2 = le16_to_cpu(status->pkt_len);
+
+ /* Have the true size reflect the extra unused space in
+ * the original receive buffer. Distribute the "cost"
+ * proportionately across all aggregated packets in the
+ * buffer.
+ */
+ extra = DIV_ROUND_CLOSEST(unused * len, total_len);
ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
}
@@ -1257,7 +1300,7 @@ static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
{
struct page *page;
- ipa_endpoint_replenish(endpoint, 1);
+ ipa_endpoint_replenish(endpoint, true);
if (trans->cancelled)
return;
@@ -1378,7 +1421,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
do {
if (!ipa_endpoint_aggr_active(endpoint))
break;
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
} while (retries--);
/* Check one last time */
@@ -1399,7 +1442,7 @@ static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
*/
gsi_channel_reset(gsi, endpoint->channel_id, true);
- msleep(1);
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
goto out_suspend_again;
@@ -1564,7 +1607,7 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
- ipa_cmd_tag_process(ipa);
+ ipa_cmd_pipeline_clear(ipa);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 84bb8ae92725..97c1b55405cb 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -15,7 +15,6 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
-#include <linux/remoteproc.h>
#include <linux/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
@@ -581,10 +580,10 @@ ipa_resource_config(struct ipa *ipa, const struct ipa_resource_data *data)
return -EINVAL;
for (i = 0; i < data->resource_src_count; i++)
- ipa_resource_config_src(ipa, data->resource_src);
+ ipa_resource_config_src(ipa, &data->resource_src[i]);
for (i = 0; i < data->resource_dst_count; i++)
- ipa_resource_config_dst(ipa, data->resource_dst);
+ ipa_resource_config_dst(ipa, &data->resource_dst[i]);
return 0;
}
@@ -729,19 +728,6 @@ static const struct of_device_id ipa_match[] = {
};
MODULE_DEVICE_TABLE(of, ipa_match);
-static phandle of_property_read_phandle(const struct device_node *np,
- const char *name)
-{
- struct property *prop;
- int len = 0;
-
- prop = of_find_property(np, name, &len);
- if (!prop || len != sizeof(__be32))
- return 0;
-
- return be32_to_cpup(prop->value);
-}
-
/* Check things that can be validated at build time. This just
* groups these things BUILD_BUG_ON() calls don't clutter the rest
* of the code.
@@ -807,10 +793,8 @@ static int ipa_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ipa_data *data;
struct ipa_clock *clock;
- struct rproc *rproc;
bool modem_init;
struct ipa *ipa;
- phandle ph;
int ret;
ipa_validate_build();
@@ -829,25 +813,12 @@ static int ipa_probe(struct platform_device *pdev)
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- /* We rely on remoteproc to tell us about modem state changes */
- ph = of_property_read_phandle(dev->of_node, "modem-remoteproc");
- if (!ph) {
- dev_err(dev, "DT missing \"modem-remoteproc\" property\n");
- return -EINVAL;
- }
-
- rproc = rproc_get_by_phandle(ph);
- if (!rproc)
- return -EPROBE_DEFER;
-
/* The clock and interconnects might not be ready when we're
* probed, so might return -EPROBE_DEFER.
*/
clock = ipa_clock_init(dev, data->clock_data);
- if (IS_ERR(clock)) {
- ret = PTR_ERR(clock);
- goto err_rproc_put;
- }
+ if (IS_ERR(clock))
+ return PTR_ERR(clock);
/* No more EPROBE_DEFER. Allocate and initialize the IPA structure */
ipa = kzalloc(sizeof(*ipa), GFP_KERNEL);
@@ -858,9 +829,9 @@ static int ipa_probe(struct platform_device *pdev)
ipa->pdev = pdev;
dev_set_drvdata(dev, ipa);
- ipa->modem_rproc = rproc;
ipa->clock = clock;
ipa->version = data->version;
+ init_completion(&ipa->completion);
ret = ipa_reg_init(ipa);
if (ret)
@@ -935,8 +906,6 @@ err_kfree_ipa:
kfree(ipa);
err_clock_exit:
ipa_clock_exit(clock);
-err_rproc_put:
- rproc_put(rproc);
return ret;
}
@@ -944,7 +913,6 @@ err_rproc_put:
static int ipa_remove(struct platform_device *pdev)
{
struct ipa *ipa = dev_get_drvdata(&pdev->dev);
- struct rproc *rproc = ipa->modem_rproc;
struct ipa_clock *clock = ipa->clock;
int ret;
@@ -970,7 +938,6 @@ static int ipa_remove(struct platform_device *pdev)
ipa_reg_exit(ipa);
kfree(ipa);
ipa_clock_exit(clock);
- rproc_put(rproc);
return 0;
}
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 0cc3a3374caa..f25029b9ec85 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -336,7 +336,7 @@ static void ipa_imem_exit(struct ipa *ipa)
size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size);
if (size != ipa->imem_size)
- dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n",
size, ipa->imem_size);
} else {
dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n");
@@ -440,7 +440,7 @@ static void ipa_smem_exit(struct ipa *ipa)
size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size);
if (size != ipa->smem_size)
- dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n",
+ dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n",
size, ipa->smem_size);
} else {
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index e34fe2d77324..9b08eb823984 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -216,6 +216,7 @@ int ipa_modem_start(struct ipa *ipa)
ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX]->netdev = netdev;
ipa->name_map[IPA_ENDPOINT_AP_MODEM_RX]->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &ipa->pdev->dev);
priv = netdev_priv(netdev);
priv->ipa = ipa;
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index e6b0827a244e..732e691e9aa6 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -408,15 +408,18 @@ enum ipa_cs_offload_en {
static inline u32 ipa_header_size_encoded(enum ipa_version version,
u32 header_size)
{
+ u32 size = header_size & field_mask(HDR_LEN_FMASK);
u32 val;
- val = u32_encode_bits(header_size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5)
+ val = u32_encode_bits(size, HDR_LEN_FMASK);
+ if (version < IPA_VERSION_4_5) {
+ /* ipa_assert(header_size == size); */
return val;
+ }
/* IPA v4.5 adds a few more most-significant bits */
- header_size >>= hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(header_size, HDR_LEN_MSB_FMASK);
+ size = header_size >> hweight32(HDR_LEN_FMASK);
+ val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
return val;
}
@@ -425,15 +428,18 @@ static inline u32 ipa_header_size_encoded(enum ipa_version version,
static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
u32 offset)
{
+ u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
u32 val;
- val = u32_encode_bits(offset, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5)
+ val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
+ if (version < IPA_VERSION_4_5) {
+ /* ipa_assert(offset == off); */
return val;
+ }
/* IPA v4.5 adds a few more most-significant bits */
- offset >>= hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(offset, HDR_OFST_METADATA_MSB_FMASK);
+ off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
+ val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
return val;
}
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 32e2d3e052d5..baaab3dd0e63 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2021 Linaro Ltd.
*/
#include <linux/types.h>
@@ -239,6 +239,11 @@ static void ipa_table_validate_build(void)
#endif /* !IPA_VALIDATE */
+bool ipa_table_hash_support(struct ipa *ipa)
+{
+ return ipa->version != IPA_VERSION_4_2;
+}
+
/* Zero entry count means no table, so just return a 0 address */
static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count)
{
@@ -412,8 +417,7 @@ int ipa_table_hash_flush(struct ipa *ipa)
struct gsi_trans *trans;
u32 val;
- /* IPA version 4.2 does not support hashed tables */
- if (ipa->version == IPA_VERSION_4_2)
+ if (!ipa_table_hash_support(ipa))
return 0;
trans = ipa_cmd_trans_alloc(ipa, 1);
@@ -531,8 +535,7 @@ static void ipa_filter_config(struct ipa *ipa, bool modem)
enum gsi_ee_id ee_id = modem ? GSI_EE_MODEM : GSI_EE_AP;
u32 ep_mask = ipa->filter_map;
- /* IPA version 4.2 has no hashed route tables */
- if (ipa->version == IPA_VERSION_4_2)
+ if (!ipa_table_hash_support(ipa))
return;
while (ep_mask) {
@@ -582,8 +585,7 @@ static void ipa_route_config(struct ipa *ipa, bool modem)
{
u32 route_id;
- /* IPA version 4.2 has no hashed route tables */
- if (ipa->version == IPA_VERSION_4_2)
+ if (!ipa_table_hash_support(ipa))
return;
for (route_id = 0; route_id < IPA_ROUTE_COUNT_MAX; route_id++)
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 78038d14fcea..1a68d20f19d6 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2021 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -52,6 +52,12 @@ static inline bool ipa_filter_map_valid(struct ipa *ipa, u32 filter_mask)
#endif /* !IPA_VALIDATE */
/**
+ * ipa_table_hash_support() - Return true if hashed tables are supported
+ * @ipa: IPA pointer
+ */
+bool ipa_table_hash_support(struct ipa *ipa);
+
+/**
* ipa_table_reset() - Reset filter and route tables entries to "none"
* @ipa: IPA pointer
* @modem: Whether to reset modem or AP entries