diff options
-rw-r--r-- | Documentation/driver-model/devres.txt | 1 | ||||
-rw-r--r-- | crypto/async_tx/async_pq.c | 10 | ||||
-rw-r--r-- | crypto/async_tx/raid6test.c | 4 | ||||
-rw-r--r-- | drivers/dma/dmaengine.c | 53 | ||||
-rw-r--r-- | drivers/dma/hsu/hsu.c | 8 | ||||
-rw-r--r-- | drivers/dma/idma64.c | 8 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.c | 6 | ||||
-rw-r--r-- | drivers/dma/mic_x100_dma.c | 8 | ||||
-rw-r--r-- | drivers/dma/mv_xor_v2.c | 16 | ||||
-rw-r--r-- | include/linux/dmaengine.h | 6 | ||||
-rw-r--r-- | sound/soc/soc-generic-dmaengine-pcm.c | 2 |
11 files changed, 94 insertions, 28 deletions
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt index 7c1bb3d0c222..43681ca0837f 100644 --- a/Documentation/driver-model/devres.txt +++ b/Documentation/driver-model/devres.txt @@ -240,6 +240,7 @@ CLOCK devm_of_clk_add_hw_provider() DMA + dmaenginem_async_device_register() dmam_alloc_coherent() dmam_alloc_attrs() dmam_declare_coherent_memory() diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 56bd612927ab..80dc567801ec 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -42,6 +42,8 @@ static struct page *pq_scribble_page; #define P(b, d) (b[d-2]) #define Q(b, d) (b[d-1]) +#define MAX_DISKS 255 + /** * do_async_gen_syndrome - asynchronously calculate P and/or Q */ @@ -184,7 +186,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, struct dma_device *device = chan ? chan->device : NULL; struct dmaengine_unmap_data *unmap = NULL; - BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); + BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); if (device) unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); @@ -196,7 +198,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, is_dma_pq_aligned(device, offset, 0, len)) { struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = 0; - unsigned char coefs[src_cnt]; + unsigned char coefs[MAX_DISKS]; int i, j; /* run the p+q asynchronously */ @@ -299,11 +301,11 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx; - unsigned char coefs[disks-2]; + unsigned char coefs[MAX_DISKS]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; struct dmaengine_unmap_data *unmap = NULL; - BUG_ON(disks < 4); + BUG_ON(disks < 4 || disks > MAX_DISKS); if (device) unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c index dad95f45b88f..a5edaabae12a 100644 --- a/crypto/async_tx/raid6test.c +++ b/crypto/async_tx/raid6test.c @@ -81,11 +81,13 @@ static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, stru init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); } else { - struct page *blocks[disks]; + struct page *blocks[NDISKS]; struct page *dest; int count = 0; int i; + BUG_ON(disks > NDISKS); + /* data+Q failure. Reconstruct data from P, * then rebuild syndrome */ diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 08ba8473a284..272bed6c8ba7 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -500,12 +500,8 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) caps->max_burst = device->max_burst; caps->residue_granularity = device->residue_granularity; caps->descriptor_reuse = device->descriptor_reuse; - - /* - * Some devices implement only pause (e.g. to get residuum) but no - * resume. However cmd_pause is advertised as pause AND resume. - */ - caps->cmd_pause = !!(device->device_pause && device->device_resume); + caps->cmd_pause = !!device->device_pause; + caps->cmd_resume = !!device->device_resume; caps->cmd_terminate = !!device->device_terminate_all; return 0; @@ -774,8 +770,14 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) return ERR_PTR(-ENODEV); chan = __dma_request_channel(mask, NULL, NULL); - if (!chan) - chan = ERR_PTR(-ENODEV); + if (!chan) { + mutex_lock(&dma_list_mutex); + if (list_empty(&dma_device_list)) + chan = ERR_PTR(-EPROBE_DEFER); + else + chan = ERR_PTR(-ENODEV); + mutex_unlock(&dma_list_mutex); + } return chan; } @@ -1139,6 +1141,41 @@ void dma_async_device_unregister(struct dma_device *device) } EXPORT_SYMBOL(dma_async_device_unregister); +static void dmam_device_release(struct device *dev, void *res) +{ + struct dma_device *device; + + device = *(struct dma_device **)res; + dma_async_device_unregister(device); +} + +/** + * dmaenginem_async_device_register - registers DMA devices found + * @device: &dma_device + * + * The operation is managed and will be undone on driver detach. + */ +int dmaenginem_async_device_register(struct dma_device *device) +{ + void *p; + int ret; + + p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL); + if (!p) + return -ENOMEM; + + ret = dma_async_device_register(device); + if (!ret) { + *(struct dma_device **)p = device; + devres_add(device->dev, p); + } else { + devres_free(p); + } + + return ret; +} +EXPORT_SYMBOL(dmaenginem_async_device_register); + struct dmaengine_unmap_pool { struct kmem_cache *cache; const char *name; diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 29d04ca71d52..202ffa9f7611 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -413,6 +413,13 @@ static void hsu_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(to_virt_chan(chan)); } +static void hsu_dma_synchronize(struct dma_chan *chan) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + + vchan_synchronize(&hsuc->vchan); +} + int hsu_dma_probe(struct hsu_dma_chip *chip) { struct hsu_dma *hsu; @@ -459,6 +466,7 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) hsu->dma.device_pause = hsu_dma_pause; hsu->dma.device_resume = hsu_dma_resume; hsu->dma.device_terminate_all = hsu_dma_terminate_all; + hsu->dma.device_synchronize = hsu_dma_synchronize; hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index e5c911200bdb..1fbf9cb9b742 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -496,6 +496,13 @@ static int idma64_terminate_all(struct dma_chan *chan) return 0; } +static void idma64_synchronize(struct dma_chan *chan) +{ + struct idma64_chan *idma64c = to_idma64_chan(chan); + + vchan_synchronize(&idma64c->vchan); +} + static int idma64_alloc_chan_resources(struct dma_chan *chan) { struct idma64_chan *idma64c = to_idma64_chan(chan); @@ -583,6 +590,7 @@ static int idma64_probe(struct idma64_chip *chip) idma64->dma.device_pause = idma64_pause; idma64->dma.device_resume = idma64_resume; idma64->dma.device_terminate_all = idma64_terminate_all; + idma64->dma.device_synchronize = idma64_synchronize; idma64->dma.src_addr_widths = IDMA64_BUSWIDTHS; idma64->dma.dst_addr_widths = IDMA64_BUSWIDTHS; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 8b5b23a8ace9..23fb2fa04000 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -688,6 +688,12 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) { u64 phys_complete; + /* set the completion address register again */ + writel(lower_32_bits(ioat_chan->completion_dma), + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(upper_32_bits(ioat_chan->completion_dma), + ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + ioat_quiesce(ioat_chan, 0); if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) __cleanup(ioat_chan, phys_complete); diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 68dd79783b54..b76cb17d879c 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -470,11 +470,6 @@ static void mic_dma_chan_destroy(struct mic_dma_chan *ch) mic_dma_chan_mask_intr(ch); } -static void mic_dma_unregister_dma_device(struct mic_dma_device *mic_dma_dev) -{ - dma_async_device_unregister(&mic_dma_dev->dma_dev); -} - static int mic_dma_setup_irq(struct mic_dma_chan *ch) { ch->cookie = @@ -630,7 +625,7 @@ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev, list_add_tail(&mic_dma_dev->mic_ch[i].api_ch.device_node, &mic_dma_dev->dma_dev.channels); } - return dma_async_device_register(&mic_dma_dev->dma_dev); + return dmaenginem_async_device_register(&mic_dma_dev->dma_dev); } /* @@ -678,7 +673,6 @@ alloc_error: static void mic_dma_dev_unreg(struct mic_dma_device *mic_dma_dev) { - mic_dma_unregister_dma_device(mic_dma_dev); mic_dma_uninit(mic_dma_dev); kfree(mic_dma_dev); } diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index c6589ccf1b9a..8dc0aa4d73ab 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -174,6 +174,7 @@ struct mv_xor_v2_device { int desc_size; unsigned int npendings; unsigned int hw_queue_idx; + struct msi_desc *msi_desc; }; /** @@ -588,11 +589,9 @@ static void mv_xor_v2_tasklet(unsigned long data) */ dma_cookie_complete(&next_pending_sw_desc->async_tx); - if (next_pending_sw_desc->async_tx.callback) - next_pending_sw_desc->async_tx.callback( - next_pending_sw_desc->async_tx.callback_param); - dma_descriptor_unmap(&next_pending_sw_desc->async_tx); + dmaengine_desc_get_callback_invoke( + &next_pending_sw_desc->async_tx, NULL); } dma_run_dependencies(&next_pending_sw_desc->async_tx); @@ -643,9 +642,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_SIZE_OFF); /* write the DESQ address to the DMA enngine*/ - writel(xor_dev->hw_desq & 0xFFFFFFFF, + writel(lower_32_bits(xor_dev->hw_desq), xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BALR_OFF); - writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, + writel(upper_32_bits(xor_dev->hw_desq), xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); /* @@ -780,6 +779,7 @@ static int mv_xor_v2_probe(struct platform_device *pdev) msi_desc = first_msi_entry(&pdev->dev); if (!msi_desc) goto free_msi_irqs; + xor_dev->msi_desc = msi_desc; ret = devm_request_irq(&pdev->dev, msi_desc->irq, mv_xor_v2_interrupt_handler, 0, @@ -897,8 +897,12 @@ static int mv_xor_v2_remove(struct platform_device *pdev) xor_dev->desc_size * MV_XOR_V2_DESC_NUM, xor_dev->hw_desq_virt, xor_dev->hw_desq); + devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev); + platform_msi_domain_free_irqs(&pdev->dev); + tasklet_kill(&xor_dev->irq_tasklet); + clk_disable_unprepare(xor_dev->clk); return 0; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 861be5cab1df..d49ec5c31944 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -415,7 +415,9 @@ enum dma_residue_granularity { * each type, the dma controller should set BIT(<TYPE>) and same * should be checked by controller as well * @max_burst: max burst capability per-transfer - * @cmd_pause: true, if pause and thereby resume is supported + * @cmd_pause: true, if pause is supported (i.e. for reading residue or + * for resume later) + * @cmd_resume: true, if resume is supported * @cmd_terminate: true, if terminate cmd is supported * @residue_granularity: granularity of the reported transfer residue * @descriptor_reuse: if a descriptor can be reused by client and @@ -427,6 +429,7 @@ struct dma_slave_caps { u32 directions; u32 max_burst; bool cmd_pause; + bool cmd_resume; bool cmd_terminate; enum dma_residue_granularity residue_granularity; bool descriptor_reuse; @@ -1403,6 +1406,7 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc) /* --- DMA device --- */ int dma_async_device_register(struct dma_device *device); +int dmaenginem_async_device_register(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device); void dma_run_dependencies(struct dma_async_tx_descriptor *tx); struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index 56a541b9ff9e..76c46d793843 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -156,7 +156,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea ret = dma_get_slave_caps(chan, &dma_caps); if (ret == 0) { - if (dma_caps.cmd_pause) + if (dma_caps.cmd_pause && dma_caps.cmd_resume) hw.info |= SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME; if (dma_caps.residue_granularity <= DMA_RESIDUE_GRANULARITY_SEGMENT) hw.info |= SNDRV_PCM_INFO_BATCH; |