summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorG, Manjunath Kondaiah <manjugk@ti.com>2010-11-02 11:59:00 +0530
committerG, Manjunath Kondaiah <manjugk@ti.com>2010-11-02 11:59:34 +0530
commit17fe20e68624675e937b932c1861cd060f9f52cb (patch)
tree133bb3162a0202147225b54efb237d353ff6ccd5
parent29e973b87c89e22a8ee95ee38afb0039a395d50d (diff)
parentb4bba02195eb63861da3387d86c51a01e22f49a4 (diff)
Merge 'syslink changes' into L24.11ti-2.6.35-omap4-L24.11-p2
-rw-r--r--arch/arm/mach-omap2/ipu_dev.c9
-rw-r--r--arch/arm/mach-omap2/ipu_drv.c61
-rw-r--r--arch/arm/mach-omap2/remoteproc44xx.c9
-rw-r--r--arch/arm/plat-omap/dmm_user.c187
-rw-r--r--arch/arm/plat-omap/include/plat/dmm_user.h37
-rw-r--r--arch/arm/plat-omap/include/plat/ipu_dev.h2
-rw-r--r--arch/arm/plat-omap/include/syslink/notifydefs.h2
-rw-r--r--arch/arm/plat-omap/iodmm.c657
-rw-r--r--arch/arm/plat-omap/iommu.c14
-rw-r--r--arch/arm/plat-omap/remoteproc.c8
-rw-r--r--drivers/dsp/syslink/Kconfig6
-rw-r--r--drivers/dsp/syslink/devh/44xx/devh44xx.c12
-rw-r--r--drivers/dsp/syslink/ipu_pm/ipu_pm.c181
-rw-r--r--drivers/dsp/syslink/ipu_pm/ipu_pm.h18
-rw-r--r--drivers/dsp/syslink/omap_notify/notify.c147
15 files changed, 728 insertions, 622 deletions
diff --git a/arch/arm/mach-omap2/ipu_dev.c b/arch/arm/mach-omap2/ipu_dev.c
index e10f25f36afb..8d3386f1f86c 100644
--- a/arch/arm/mach-omap2/ipu_dev.c
+++ b/arch/arm/mach-omap2/ipu_dev.c
@@ -201,8 +201,6 @@ inline int ipu_pm_module_set_bandwidth(unsigned rsrc,
}
EXPORT_SYMBOL(ipu_pm_module_set_bandwidth);
-static struct omap_device *od_iva;
-
/* FIXME: not in use now
* static struct omap_ipupm_mod_ops omap_ipu_ops = {
* .start = NULL,
@@ -330,6 +328,7 @@ static int __init omap_ipussdev_init(void)
{
int status = -ENODEV;
int i;
+ int first = 1;
struct omap_hwmod *oh;
struct omap_device *od;
char *oh_name;
@@ -368,7 +367,11 @@ static int __init omap_ipussdev_init(void)
WARN(status, "Could not build omap_device for %s %s\n",
pdev_name, oh_name);
if (!status) {
- od_iva = od;
+ /* Save the id of the first registered dev */
+ if (first) {
+ ipu_pm_first_dev = od->pdev.id;
+ first = 0;
+ }
omap_ipupm_data[i].pdev = &od->pdev;
omap_ipupm_data[i].dev = &od->pdev.dev;
}
diff --git a/arch/arm/mach-omap2/ipu_drv.c b/arch/arm/mach-omap2/ipu_drv.c
index 85796c0a140c..88a78ba7be29 100644
--- a/arch/arm/mach-omap2/ipu_drv.c
+++ b/arch/arm/mach-omap2/ipu_drv.c
@@ -47,6 +47,7 @@
static struct class *omap_ipu_pm_class;
static dev_t omap_ipu_pm_dev;
+int ipu_pm_first_dev;
static struct proc_dir_entry *ipu_pm_proc_entry;
/* we could iterate over something much more
@@ -189,12 +190,72 @@ static int __devinit ipu_pm_probe(struct platform_device *pdev)
return 0;
}
+static int ipu_pm_drv_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int retval = 0;
+
+ if (pdev->id == ipu_pm_first_dev) {
+ pr_debug("%s.%d ASKED TO SUSPEND", pdev->name, pdev->id);
+ /* save any local context,
+ * BIOS timers could be saved locally or on Ducati
+ */
+
+ /* call our notification function */
+ retval = ipu_pm_notifications(PM_SUSPEND, NULL);
+
+ /* FIXME: Currently sending SUSPEND is enough to send
+ * Ducati to hibernate, save ctx can be called at this
+ * point to save ctx and reset remote procs
+ * Currently the save ctx process can be called using
+ * which ever proc_id, maybe this will change when
+ * Tesla support is added.
+ */
+ /* sysm3 is handling hibernation of ducati currently */
+ ipu_pm_save_ctx(SYS_M3);
+
+ /* return result, should be zero if all Ducati clients
+ * returned zero else fail code
+ */
+ }
+
+ return retval;
+}
+
+static int ipu_pm_drv_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ int retval = 0;
+
+ if (pdev->id == ipu_pm_first_dev) {
+ pr_debug("%s.%d ASKED TO RESUME", pdev->name, pdev->id);
+ /* restore any local context,
+ * BIOS timers could be restored locally or on Ducati
+ */
+
+ /* call our notification function */
+ retval = ipu_pm_notifications(PM_RESUME, NULL);
+
+ /* return result, should be zero if all Ducati clients
+ * returned zero else fail code
+ */
+ }
+
+ return retval;
+}
+
+static const struct dev_pm_ops ipu_pm_ops = {
+ .suspend = ipu_pm_drv_suspend,
+ .resume = ipu_pm_drv_resume,
+};
+
static struct platform_driver ipu_pm_driver = {
.probe = ipu_pm_probe,
/*.remove = ipu_pm_remove, */
.driver = {
.name = IPU_DRIVER_NAME,
.owner = THIS_MODULE,
+ .pm = &ipu_pm_ops,
},
};
diff --git a/arch/arm/mach-omap2/remoteproc44xx.c b/arch/arm/mach-omap2/remoteproc44xx.c
index 4189456d018d..d25becba39f5 100644
--- a/arch/arm/mach-omap2/remoteproc44xx.c
+++ b/arch/arm/mach-omap2/remoteproc44xx.c
@@ -170,13 +170,22 @@ static struct omap_rproc_platform_data omap4_rproc_data[] = {
.name = "ducati-proc0",
.ops = &omap4_ducati0_ops,
.oh_name = "ipu_c0",
+#ifdef CONFIG_SYSLINK_DUCATI_PM
+ .timer_id = 3,
+#else
.timer_id = -1,
+#endif
},
{
.name = "ducati-proc1",
.ops = &omap4_ducati1_ops,
.oh_name = "ipu_c1",
+#ifdef CONFIG_SYSLINK_DUCATI_PM
+ .timer_id = 4,
+#else
.timer_id = -1,
+#endif
+
},
};
diff --git a/arch/arm/plat-omap/dmm_user.c b/arch/arm/plat-omap/dmm_user.c
index 3afa28db3d2d..6137ebf22354 100644
--- a/arch/arm/plat-omap/dmm_user.c
+++ b/arch/arm/plat-omap/dmm_user.c
@@ -30,12 +30,10 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
-#include <linux/eventfd.h>
#include <plat/iommu.h>
#include <plat/iovmm.h>
#include <plat/dmm_user.h>
-#include "iopgtable.h"
#define OMAP_DMM_NAME "iovmm-omap"
@@ -59,161 +57,46 @@ static int omap_dmm_ioctl(struct inode *inode, struct file *filp,
switch (cmd) {
case DMM_IOCSETTLBENT:
- {
- struct iotlb_entry e;
- int size;
- size = copy_from_user(&e, (void __user *)args,
- sizeof(struct iotlb_entry));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- load_iotlb_entry(obj->iovmm->iommu, &e);
+ /* FIXME: re-visit this check to perform
+ proper permission checks */
+ /* if (!capable(CAP_SYS_ADMIN))
+ return -EPERM; */
+ ret = program_tlb_entry(obj, (const void __user *)args);
break;
- }
case DMM_IOCCREATEPOOL:
- {
- struct iovmm_pool_info pool_info;
- int size;
-
- size = copy_from_user(&pool_info, (void __user *)args,
- sizeof(struct iovmm_pool_info));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- omap_create_dmm_pool(obj, pool_info.pool_id, pool_info.size,
- pool_info.da_begin);
+ /* FIXME: re-visit this check to perform
+ proper permission checks */
+ /* if (!capable(CAP_SYS_ADMIN))
+ return -EPERM; */
+ ret = omap_create_dmm_pool(obj, (const void __user *)args);
break;
- }
- case DMM_IOCDELETEPOOL:
- {
- int pool_id;
- int size;
-
- size = copy_from_user(&pool_id, (void __user *)args,
- sizeof(int));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- ret = omap_delete_dmm_pool(obj, pool_id);
- break;
- }
case DMM_IOCMEMMAP:
- {
- struct dmm_map_info map_info;
- int size;
- int status;
-
- size = copy_from_user(&map_info, (void __user *)args,
- sizeof(struct dmm_map_info));
-
- status = dmm_user(obj, map_info.mem_pool_id,
- map_info.da, map_info.mpu_addr,
- map_info.size, map_info.flags);
- ret = copy_to_user((void __user *)args, &map_info,
- sizeof(struct dmm_map_info));
+ ret = dmm_user(obj, (void __user *)args);
break;
- }
case DMM_IOCMEMUNMAP:
- {
- u32 da;
- int size;
- int status = 0;
-
- size = copy_from_user(&da, (void __user *)args, sizeof(u32));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- status = user_un_map(obj, da);
- ret = status;
+ ret = user_un_map(obj, (const void __user *)args);
break;
- }
case IOMMU_IOCEVENTREG:
- {
- int fd;
- int size;
- struct iommu_event_ntfy *fd_reg;
-
- size = copy_from_user(&fd, (void __user *)args, sizeof(int));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
-
- fd_reg = kzalloc(sizeof(struct iommu_event_ntfy), GFP_KERNEL);
- fd_reg->fd = fd;
- fd_reg->evt_ctx = eventfd_ctx_fdget(fd);
- INIT_LIST_HEAD(&fd_reg->list);
- spin_lock_irq(&obj->iovmm->iommu->event_lock);
- list_add_tail(&fd_reg->list, &obj->iovmm->iommu->event_list);
- spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+ ret = register_mmufault(obj, (const void __user *)args);
break;
- }
case IOMMU_IOCEVENTUNREG:
- {
- int fd;
- int size;
- struct iommu_event_ntfy *fd_reg, *temp_reg;
-
- size = copy_from_user(&fd, (void __user *)args, sizeof(int));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- /* Free DMM mapped memory resources */
- spin_lock_irq(&obj->iovmm->iommu->event_lock);
- list_for_each_entry_safe(fd_reg, temp_reg,
- &obj->iovmm->iommu->event_list, list) {
- if (fd_reg->fd == fd) {
- list_del(&fd_reg->list);
- kfree(fd_reg);
- }
- }
- spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+ ret = register_mmufault(obj, (const void __user *)args);
break;
- }
case DMM_IOCMEMFLUSH:
- {
- int size;
- int status;
- struct dmm_dma_info dma_info;
- size = copy_from_user(&dma_info, (void __user *)args,
- sizeof(struct dmm_dma_info));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- status = proc_begin_dma(obj, dma_info.pva, dma_info.ul_size,
- dma_info.dir);
- ret = status;
+ ret = proc_begin_dma(obj, (void __user *)args);
break;
- }
case DMM_IOCMEMINV:
- {
- int size;
- int status;
- struct dmm_dma_info dma_info;
- size = copy_from_user(&dma_info, (void __user *)args,
- sizeof(struct dmm_dma_info));
- if (size) {
- ret = -EINVAL;
- goto err_user_buf;
- }
- status = proc_end_dma(obj, dma_info.pva, dma_info.ul_size,
- dma_info.dir);
- ret = status;
+ ret = proc_end_dma(obj, (void __user *)args);
+ break;
+ /* This ioctl can be deprecated */
+ case DMM_IOCDELETEPOOL:
break;
- }
case DMM_IOCDATOPA:
default:
return -ENOTTY;
}
-err_user_buf:
- return ret;
+ return ret;
}
static int omap_dmm_open(struct inode *inode, struct file *filp)
@@ -222,6 +105,7 @@ static int omap_dmm_open(struct inode *inode, struct file *filp)
struct iovmm_device *obj;
obj = container_of(inode->i_cdev, struct iovmm_device, cdev);
+ obj->refcount++;
iodmm = kzalloc(sizeof(struct iodmm_struct), GFP_KERNEL);
INIT_LIST_HEAD(&iodmm->map_list);
@@ -240,25 +124,41 @@ static int omap_dmm_release(struct inode *inode, struct file *filp)
if (!filp->private_data) {
status = -EIO;
- goto err;
+ goto err_out;
}
obj = filp->private_data;
+
flush_signals(current);
status = mutex_lock_interruptible(&obj->iovmm->dmm_map_lock);
if (status == 0) {
- iommu_notify_event(obj->iovmm->iommu, IOMMU_CLOSE, NULL);
+ /*
+ * Report to remote Processor of the cleanup of these
+ * resources before cleaning in order to avoid MMU fault
+ * type of behavior
+ */
+ if (!list_empty(&obj->map_list)) {
+ iommu_notify_event(obj->iovmm->iommu, IOMMU_CLOSE,
+ NULL);
+ }
mutex_unlock(&obj->iovmm->dmm_map_lock);
} else {
pr_err("%s mutex_lock_interruptible returned 0x%x\n",
__func__, status);
}
+
user_remove_resources(obj);
iommu_put(obj->iovmm->iommu);
+
+ /* Delete all the DMM pools after the reference count goes to zero */
+ if (--obj->iovmm->refcount == 0)
+ omap_delete_dmm_pools(obj);
+
kfree(obj);
+
filp->private_data = NULL;
-err:
+err_out:
return status;
}
@@ -316,6 +216,7 @@ static int __devinit omap_dmm_probe(struct platform_device *pdev)
mutex_init(&obj->dmm_map_lock);
platform_set_drvdata(pdev, obj);
return 0;
+
clean_cdev:
cdev_del(&obj->cdev);
err_cdev:
@@ -326,14 +227,12 @@ static int __devexit omap_dmm_remove(struct platform_device *pdev)
{
struct iovmm_device *obj = platform_get_drvdata(pdev);
int major = MAJOR(omap_dmm_dev);
+
device_destroy(omap_dmm_class, MKDEV(major, obj->minor));
cdev_del(&obj->cdev);
platform_set_drvdata(pdev, NULL);
- iopgtable_clear_entry_all(obj->iommu);
- iommu_put(obj->iommu);
- free_pages((unsigned long)obj->iommu->iopgd,
- get_order(IOPGD_TABLE_SIZE));
kfree(obj);
+
return 0;
}
diff --git a/arch/arm/plat-omap/include/plat/dmm_user.h b/arch/arm/plat-omap/include/plat/dmm_user.h
index b02d82d29e4a..c231314810ec 100644
--- a/arch/arm/plat-omap/include/plat/dmm_user.h
+++ b/arch/arm/plat-omap/include/plat/dmm_user.h
@@ -53,12 +53,8 @@ struct iovmm_pool_info {
/* used to cache dma mapping information */
struct device_dma_map_info {
- /* direction of DMA in action, or DMA_NONE */
- enum dma_data_direction dir;
/* number of elements requested by us */
int num_pages;
- /* number of elements returned from dma_map_sg */
- int sg_num;
/* list of buffers used in this DMA action */
struct scatterlist *sg;
};
@@ -68,7 +64,7 @@ struct dmm_map_info {
u32 *da;
u32 num_of_buf;
u32 size;
- u32 mem_pool_id;
+ u32 pool_id;
u32 flags;
};
@@ -100,22 +96,31 @@ struct iovmm_device {
struct iommu *iommu;
const char *name;
/* List of memory pool it manages */
- struct list_head mmap_pool;
+ struct list_head mmap_pool;
struct mutex dmm_map_lock;
int minor;
struct cdev cdev;
+ int refcount;
};
/* user dmm functions */
-int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
- u32 va, size_t bytes, u32 flags);
+int dmm_user(struct iodmm_struct *obj, void __user *args);
+
void user_remove_resources(struct iodmm_struct *obj);
-int user_un_map(struct iodmm_struct *obj, u32 map_addr);
-int proc_begin_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir);
-int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir);
-int omap_create_dmm_pool(struct iodmm_struct *obj, int pool_id, int size,
- int sa);
-int omap_delete_dmm_pool(struct iodmm_struct *obj, int pool_id);
+
+int user_un_map(struct iodmm_struct *obj, const void __user *args);
+
+int proc_begin_dma(struct iodmm_struct *obj, const void __user *args);
+
+int proc_end_dma(struct iodmm_struct *obj, const void __user *args);
+
+int omap_create_dmm_pool(struct iodmm_struct *obj, const void __user *args);
+
+int omap_delete_dmm_pools(struct iodmm_struct *obj);
+
+int program_tlb_entry(struct iodmm_struct *obj, const void __user *args);
+
+int register_mmufault(struct iodmm_struct *obj, const void __user *args);
+
+int unregister_mmufault(struct iodmm_struct *obj, const void __user *args);
#endif
diff --git a/arch/arm/plat-omap/include/plat/ipu_dev.h b/arch/arm/plat-omap/include/plat/ipu_dev.h
index 4b24f3bc14f2..c7b7959c38b2 100644
--- a/arch/arm/plat-omap/include/plat/ipu_dev.h
+++ b/arch/arm/plat-omap/include/plat/ipu_dev.h
@@ -90,6 +90,8 @@ struct ipu_pm_dev {
struct cdev cdev;
};
+extern int ipu_pm_first_dev;
+
extern int ipu_pm_module_start(unsigned rsrc);
extern int ipu_pm_module_stop(unsigned rsrc);
extern int ipu_pm_module_set_rate(unsigned rsrc,
diff --git a/arch/arm/plat-omap/include/syslink/notifydefs.h b/arch/arm/plat-omap/include/syslink/notifydefs.h
index b0df5d536168..e04b76382ff8 100644
--- a/arch/arm/plat-omap/include/syslink/notifydefs.h
+++ b/arch/arm/plat-omap/include/syslink/notifydefs.h
@@ -86,6 +86,8 @@ struct notify_object {
/* List of event callbacks registered */
struct list_head event_list[NOTIFY_MAXEVENTS];
/* List of event listeners registered */
+ struct mutex lock;
+ /* Lock for event_list */
};
diff --git a/arch/arm/plat-omap/iodmm.c b/arch/arm/plat-omap/iodmm.c
index 49eb0e40a6d1..a193c70c7c1d 100644
--- a/arch/arm/plat-omap/iodmm.c
+++ b/arch/arm/plat-omap/iodmm.c
@@ -6,6 +6,9 @@
* Authors: Ramesh Gupta <grgupta@ti.com>
* Hari Kanigeri <h-kanigeri2@ti.com>
*
+ * dma_map API usage in this code is inspired from Ohad Ben-Cohen's
+ * implementation in dspbridge code.
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
@@ -28,6 +31,7 @@
#include <linux/pagemap.h>
#include <linux/kernel.h>
#include <linux/genalloc.h>
+#include <linux/eventfd.h>
#include <linux/sched.h>
#include <asm/cacheflush.h>
@@ -113,10 +117,48 @@ int temp_user_dma_op(unsigned long start, unsigned long end, int op)
} while (start < end);
up_read(&mm->mmap_sem);
+
return 0;
}
#endif
+static inline struct gen_pool *get_pool_handle(struct iovmm_device *iovmm_obj,
+ int pool_id)
+{
+ struct iovmm_pool *pool;
+
+ list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) {
+ if (pool->pool_id == pool_id)
+ return pool->genpool;
+ }
+ return NULL;
+}
+
+/*
+ * This function walks through the page tables to convert a userland
+ * virtual address to physical address
+ */
+static u32 __user_va2_pa(struct mm_struct *mm, u32 address)
+{
+ pgd_t *pgd;
+ pmd_t *pmd;
+ pte_t *ptep, pte;
+
+ pgd = pgd_offset(mm, address);
+ if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+ pmd = pmd_offset(pgd, address);
+ if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+ ptep = pte_offset_map(pmd, address);
+ if (ptep) {
+ pte = *ptep;
+ if (pte_present(pte))
+ return pte & PAGE_MASK;
+ }
+ }
+ }
+ return 0;
+}
+
/* remember mapping information */
static struct dmm_map_object *add_mapping_info(struct iodmm_struct *obj,
struct gen_pool *gen_pool, u32 va, u32 da, u32 size)
@@ -222,6 +264,13 @@ static int match_containing_map_obj(struct dmm_map_object *map_obj,
return res;
}
+/**
+ * Find the mapping object based on either MPU virtual address or
+ * Device virtual address. Which option to select to search for the mapping
+ * is specified with check_va flag. check_va is set to TRUE if search is
+ * based on MPU virtual address and FALSE if search is based on Device
+ * virtual address
+ */
static struct dmm_map_object *find_containing_mapping(
struct iodmm_struct *obj,
u32 va, u32 da, bool check_va,
@@ -283,13 +332,36 @@ static int find_first_page_in_cache(struct dmm_map_object *map_obj,
/* Cache operation against kernel address instead of users */
static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start,
- ssize_t len, int pg_i)
+ size_t len)
{
struct page *page;
unsigned long offset;
ssize_t rest;
int ret = 0, i = 0;
- struct scatterlist *sg = map_obj->dma_info.sg;
+ unsigned long first_data_page = start >> PAGE_SHIFT;
+ unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
+ /* calculating the number of pages this area spans */
+ unsigned long num_pages = last_data_page - first_data_page + 1;
+ struct scatterlist *sg;
+ int pg_i;
+
+ sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
+ if (!sg) {
+ pr_err("%s: kcalloc failed\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ sg_init_table(sg, num_pages);
+
+ /* cleanup a previous sg allocation */
+ /* this may happen if application doesn't signal for e/o DMA */
+ kfree(map_obj->dma_info.sg);
+
+ map_obj->dma_info.sg = sg;
+ map_obj->dma_info.num_pages = num_pages;
+
+ pg_i = find_first_page_in_cache(map_obj, start);
while (len) {
page = get_mapping_page(map_obj, pg_i);
@@ -335,16 +407,22 @@ static int memory_regain_ownership(struct device *dev,
/* calculating the number of pages this area spans */
unsigned long num_pages = last_data_page - first_data_page + 1;
struct device_dma_map_info *dma_info = &map_obj->dma_info;
+ long pg_i;
if (!dma_info->sg)
goto out;
- if (dma_info->dir != dir || dma_info->num_pages != num_pages) {
- pr_err("%s: dma info doesn't match given params\n", __func__);
+ if (num_pages > dma_info->num_pages) {
+ pr_err("%s: dma info params invalid\n", __func__);
return -EINVAL;
}
- dma_unmap_sg(dev, dma_info->sg, num_pages, dma_info->dir);
+ pg_i = find_first_page_in_cache(map_obj, start);
+ if (pg_i == -1) {
+ ret = -EFAULT;
+ goto out;
+ }
+ dma_unmap_sg(dev, (dma_info->sg), num_pages, dir);
pr_debug("%s: dma_map_sg unmapped\n", __func__);
@@ -357,127 +435,111 @@ static int memory_give_ownership(struct device *dev,
struct dmm_map_object *map_obj, unsigned long start,
ssize_t len, enum dma_data_direction dir)
{
- int pg_i, ret, sg_num;
- struct scatterlist *sg;
+ int ret, sg_num;
+ struct device_dma_map_info *dma_info = &map_obj->dma_info;
unsigned long first_data_page = start >> PAGE_SHIFT;
unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT);
/* calculating the number of pages this area spans */
unsigned long num_pages = last_data_page - first_data_page + 1;
+ long pg_i;
pg_i = find_first_page_in_cache(map_obj, start);
- if (pg_i < 0) {
- pr_err("%s: failed to find first page in cache\n", __func__);
- ret = -EINVAL;
- goto out;
- }
-
- sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL);
- if (!sg) {
- pr_err("%s: kcalloc failed\n", __func__);
- ret = -ENOMEM;
+ if (pg_i == -1) {
+ ret = -EFAULT;
goto out;
}
- sg_init_table(sg, num_pages);
-
- /* cleanup a previous sg allocation */
- /* this may happen if application doesn't signal for e/o DMA */
- kfree(map_obj->dma_info.sg);
-
- map_obj->dma_info.sg = sg;
- map_obj->dma_info.dir = dir;
- map_obj->dma_info.num_pages = num_pages;
-
- ret = build_dma_sg(map_obj, start, len, pg_i);
- if (ret)
- goto kfree_sg;
-
- sg_num = dma_map_sg(dev, sg, num_pages, dir);
+ sg_num = dma_map_sg(dev, (dma_info->sg), num_pages, dir);
if (sg_num < 1) {
pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num);
ret = -EFAULT;
- goto kfree_sg;
+ goto out;
}
pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num);
- map_obj->dma_info.sg_num = sg_num;
return 0;
-
-kfree_sg:
- kfree(sg);
- map_obj->dma_info.sg = NULL;
out:
return ret;
}
#endif
-int proc_begin_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir)
+int proc_begin_dma(struct iodmm_struct *obj, const void __user *args)
{
int status = 0;
+ struct dmm_dma_info dma_info;
#ifdef CONFIG_DMM_DMA_API
- u32 va_align;
struct dmm_map_object *map_obj;
- struct device *dev = obj->iovmm->iommu->dev;
- va_align = round_down((u32)pva, PAGE_SIZE);
+ struct device *dev;
+
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ dev = obj->iovmm->iommu->dev;
mutex_lock(&obj->iovmm->dmm_map_lock);
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
- (u32)va_align,
- ul_size, dir);
+ (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir);
/* find requested memory are in cached mapping information */
- map_obj = find_containing_mapping(obj, (u32) va_align, 0, true,
- ul_size);
+ map_obj = find_containing_mapping(obj, (u32)dma_info.pva, 0, true,
+ dma_info.ul_size);
if (!map_obj) {
pr_err("%s: find_containing_mapping failed\n", __func__);
status = -EFAULT;
goto err_out;
}
- if (memory_give_ownership(dev, map_obj, (u32)pva, ul_size, dir)) {
+ if (memory_give_ownership(dev, map_obj, (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir)) {
pr_err("%s: InValid address parameters %x %x\n",
- __func__, va_align, ul_size);
+ __func__, (u32)dma_info.pva, dma_info.ul_size);
status = -EFAULT;
}
err_out:
mutex_unlock(&obj->iovmm->dmm_map_lock);
#else
-
- u32 end = (u32)pva + ul_size;
- status = temp_user_dma_op((u32)pva, end, 3);
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ status = temp_user_dma_op((u32)dma_info.pva,
+ (u32)dma_info.pva + dma_info.ul_size, 3);
#endif
return status;
}
-int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
- enum dma_data_direction dir)
+int proc_end_dma(struct iodmm_struct *obj, const void __user *args)
{
int status = 0;
+ struct dmm_dma_info dma_info;
#ifdef CONFIG_DMM_DMA_API
- u32 va_align;
+ struct device *dev;
struct dmm_map_object *map_obj;
- struct device *dev = obj->iovmm->iommu->dev;
- va_align = round_down((u32)pva, PAGE_SIZE);
+
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ dev = obj->iovmm->iommu->dev;
pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__,
- (u32)va_align,
- ul_size, dir);
+ (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir);
mutex_lock(&obj->iovmm->dmm_map_lock);
/* find requested memory are in cached mapping information */
- map_obj = find_containing_mapping(obj, (u32) va_align, 0, true,
- ul_size);
+ map_obj = find_containing_mapping(obj, (u32)dma_info.pva, 0, true,
+ dma_info.ul_size);
if (!map_obj) {
pr_err("%s: find_containing_mapping failed\n", __func__);
status = -EFAULT;
goto err_out;
}
- if (memory_regain_ownership(dev, map_obj, (u32)pva, ul_size, dir)) {
+ if (memory_regain_ownership(dev, map_obj, (u32)dma_info.pva,
+ dma_info.ul_size, dma_info.dir)) {
pr_err("%s: InValid address parameters %p %x\n",
- __func__, pva, ul_size);
+ __func__, dma_info.pva, dma_info.ul_size);
status = -EFAULT;
goto err_out;
}
@@ -485,13 +547,117 @@ int proc_end_dma(struct iodmm_struct *obj, void *pva, u32 ul_size,
err_out:
mutex_unlock(&obj->iovmm->dmm_map_lock);
#else
- u32 end = (u32)pva + ul_size;
- status = temp_user_dma_op((u32)pva, end, 1);
+ if (copy_from_user(&dma_info, (void __user *)args,
+ sizeof(struct dmm_dma_info)))
+ return -EFAULT;
+ status = temp_user_dma_op((u32)dma_info.pva,
+ (u32)dma_info.pva + dma_info.ul_size, 1);
#endif
return status;
}
/**
+ * user_to_device_unmap() - unmaps Device virtual buffer.
+ * @mmu: Pointer to iommu handle.
+ * @da DSP address
+ *
+ * This function unmaps a user space buffer into DSP virtual address.
+ *
+ */
+static int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size)
+{
+ unsigned total = size;
+ unsigned start = da;
+
+ while (total > 0) {
+ size_t bytes;
+ bytes = iopgtable_clear_entry(mmu, start);
+ if (bytes == 0)
+ bytes = PAGE_SIZE;
+ else
+ dev_dbg(mmu->dev, "%s: unmap 0x%x 0x%x\n",
+ __func__, start, bytes);
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+ total -= bytes;
+ start += bytes;
+ }
+ return 0;
+}
+
+static int __user_un_map(struct iodmm_struct *obj, u32 map_addr)
+{
+ int status = 0;
+ u32 va_align;
+ u32 size_align;
+ struct dmm_map_object *map_obj;
+ int i;
+ struct page *pg;
+
+ va_align = round_down(map_addr, PAGE_SIZE);
+
+ mutex_lock(&obj->iovmm->dmm_map_lock);
+ /*
+ * Update DMM structures. Get the size to unmap.
+ * This function returns error if the VA is not mapped
+ */
+ /* find requested memory are in cached mapping information */
+ map_obj = find_containing_mapping(obj, 0, map_addr, false, 0);
+ if (!map_obj)
+ goto err;
+ size_align = map_obj->size;
+ /* Remove mapping from the page tables. */
+ status = user_to_device_unmap(obj->iovmm->iommu, va_align,
+ size_align);
+ if (status)
+ goto err;
+
+ i = size_align/PAGE_SIZE;
+ while (i--) {
+ pg = map_obj->pages[i];
+ if (pg && pfn_valid(page_to_pfn(pg))) {
+ if (page_count(pg) < 1)
+ pr_info("%s UNMAP FAILURE !!!\n", __func__);
+ else {
+ SetPageDirty(pg);
+ page_cache_release(pg);
+ }
+ }
+ }
+ /*
+ * A successful unmap should be followed by removal of map_obj
+ * from dmm_map_list, so that mapped memory resource tracking
+ * remains uptodate
+ */
+ remove_mapping_information(obj, map_obj->da, map_obj->size);
+err:
+ mutex_unlock(&obj->iovmm->dmm_map_lock);
+ return status;
+}
+
+
+/**
+ * user_un_map - Removes User's mapped address
+ * @obj: target dmm object
+ * @args Mapped address that needs to be unmapped
+ *
+ * removes user's dmm buffer mapping
+ **/
+int user_un_map(struct iodmm_struct *obj, const void __user *args)
+{
+ int status = 0;
+ u32 map_addr;
+
+ if (copy_from_user(&map_addr, (void __user *)args, sizeof(u32)))
+ return -EFAULT;
+
+ status = __user_un_map(obj, map_addr);
+ if (status)
+ pr_err("%s:Unmap of buffer 0x%x failedn", __func__, map_addr);
+
+ return status;
+}
+
+/**
* user_to_device_map() - maps user to dsp virtual address
* @mmu: Pointer to iommu handle.
* @uva: Virtual user space address.
@@ -562,19 +728,8 @@ static int user_to_device_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
break;
}
}
- return res;
-}
-
-static inline struct gen_pool *get_pool_handle(struct iovmm_device *iovmm_obj,
- int pool_id)
-{
- struct iovmm_pool *pool;
- list_for_each_entry(pool, &iovmm_obj->mmap_pool, list) {
- if (pool->pool_id == pool_id)
- return pool->genpool;
- }
- return NULL;
+ return res;
}
/**
@@ -597,7 +752,6 @@ static int phys_to_device_map(struct iodmm_struct *obj,
struct dmm_map_object *dmm_obj;
int da;
u32 all_bits;
- u32 num_bytes = bytes;
int err = 0;
u32 pg_size[] = {SZ_16M, SZ_1M, SZ_64K, SZ_4K};
int size_flag[] = {MMU_CAM_PGSZ_16M, MMU_CAM_PGSZ_1M,
@@ -605,7 +759,7 @@ static int phys_to_device_map(struct iodmm_struct *obj,
int i;
struct gen_pool *gen_pool;
- if (!num_bytes) {
+ if (!bytes) {
err = -EINVAL;
goto exit;
}
@@ -620,34 +774,34 @@ static int phys_to_device_map(struct iodmm_struct *obj,
gen_pool = get_pool_handle(obj->iovmm, pool_id);
if (gen_pool) {
da = gen_pool_alloc(gen_pool, bytes);
- *mapped_addr = (da | ((u32)pa & (PAGE_SIZE - 1)));
+ *mapped_addr = (da | (pa & (PAGE_SIZE - 1)));
} else {
err = -EFAULT;
goto exit;
}
}
- dmm_obj = add_mapping_info(obj, gen_pool, pa, *mapped_addr, num_bytes);
+ dmm_obj = add_mapping_info(obj, gen_pool, pa, *mapped_addr, bytes);
if (dmm_obj == NULL) {
err = -ENODEV;
goto err_add_map;
}
- while (num_bytes) {
+ while (bytes) {
/*
* To find the max. page size with which both PA & VA are
* aligned
*/
all_bits = pa | da;
for (i = 0; i < 4; i++) {
- if ((num_bytes >= pg_size[i]) && ((all_bits &
+ if ((bytes >= pg_size[i]) && ((all_bits &
(pg_size[i] - 1)) == 0)) {
iotlb_init_entry(&e, da, pa,
size_flag[i] |
MMU_RAM_ENDIAN_LITTLE |
MMU_RAM_ELSZ_32);
iopgtable_store_entry(obj->iovmm->iommu, &e);
- num_bytes -= pg_size[i];
+ bytes -= pg_size[i];
da += pg_size[i];
pa += pg_size[i];
break;
@@ -663,86 +817,32 @@ exit:
}
/**
- * user_to_device_unmap() - unmaps Device virtual buffer.
- * @mmu: Pointer to iommu handle.
- * @da DSP address
- *
- * This function unmaps a user space buffer into DSP virtual address.
- *
- */
-static int user_to_device_unmap(struct iommu *mmu, u32 da, unsigned size)
-{
- unsigned total = size;
- unsigned start = da;
-
- while (total > 0) {
- size_t bytes;
- bytes = iopgtable_clear_entry(mmu, start);
- if (bytes == 0)
- bytes = PAGE_SIZE;
- else
- dev_dbg(mmu->dev, "%s: unmap 0x%x 0x%x\n",
- __func__, start, bytes);
- BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
- total -= bytes;
- start += bytes;
- }
- return 0;
-}
-
-/*
- * ======== user_va2_pa ========
- * Purpose:
- * This function walks through the page tables to convert a userland
- * virtual address to physical address
- */
-static u32 user_va2_pa(struct mm_struct *mm, u32 address)
-{
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *ptep, pte;
-
- pgd = pgd_offset(mm, address);
- if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
- pmd = pmd_offset(pgd, address);
- if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
- ptep = pte_offset_map(pmd, address);
- if (ptep) {
- pte = *ptep;
- if (pte_present(pte))
- return pte & PAGE_MASK;
- }
- }
- }
- return 0;
-}
-
-/**
* dmm_user - Maps user buffer to Device address
* @obj: target dmm object
- * @pool_id: DMM pool id
- * @da: Mapped Device Address
- * @va: User virtual Address
- * @bytes Size of the buffer to be mapped
- * flags flags on how to interpret user buffer
+ * @args: DMM map information
*
* Maps given user buffer to Device address
**/
-int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
- u32 va, size_t bytes, u32 flags)
+int dmm_user(struct iodmm_struct *obj, void __user *args)
{
struct gen_pool *gen_pool;
struct dmm_map_object *dmm_obj;
struct iovmm_device *iovmm_obj = obj->iovmm;
- u32 pa_align, da_align, size_align, tmp_addr;
+ u32 addr_align, da_align, size_align, tmp_addr;
int err = 0;
int i, num_of_pages;
struct page *pg;
struct vm_area_struct *vma;
struct mm_struct *mm = current->mm;
u32 io_addr;
+ struct dmm_map_info map_info;
struct iotlb_entry e;
+
+ if (copy_from_user(&map_info, (void __user *)args,
+ sizeof(struct dmm_map_info)))
+ return -EFAULT;
+
/*
* Important Note: va is mapped from user application process
* to current process - it must lie completely within the current
@@ -751,8 +851,9 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
down_read(&mm->mmap_sem);
/* Calculate the page-aligned PA, VA and size */
- pa_align = round_down((u32) va, PAGE_SIZE);
- size_align = round_up(bytes + va - pa_align, PAGE_SIZE);
+ addr_align = round_down((u32) map_info.mpu_addr, PAGE_SIZE);
+ size_align = round_up(map_info.size + map_info.mpu_addr - addr_align,
+ PAGE_SIZE);
mutex_lock(&iovmm_obj->dmm_map_lock);
@@ -761,18 +862,19 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
* specified if pool_id as -1, so the da is interpreted
* as the Device Address.
*/
- if (flags == DMM_DA_PHYS) {
- err = phys_to_device_map(obj, pool_id, da, pa_align,
- size_align, flags);
- goto err;
+ if (map_info.flags == DMM_DA_PHYS) {
+ err = phys_to_device_map(obj, map_info.pool_id, map_info.da,
+ addr_align, size_align, map_info.flags);
+ goto exit;
}
- vma = find_vma(mm, va);
+ vma = find_vma(mm, map_info.mpu_addr);
if (vma) {
dev_dbg(iovmm_obj->iommu->dev,
"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
- "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", va,
- bytes, vma->vm_start, vma->vm_end,
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n",
+ map_info.mpu_addr,
+ map_info.size, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
/*
@@ -780,55 +882,57 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
* spread across several VMAs. So loop through and check if the entire
* user buffer is covered
*/
- while ((vma) && (va + bytes > vma->vm_end)) {
+ while ((vma) && (map_info.mpu_addr + map_info.size > vma->vm_end)) {
/* jump to the next VMA region */
vma = find_vma(mm, vma->vm_end + 1);
dev_dbg(iovmm_obj->iommu->dev,
"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
- "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", va,
- bytes, vma->vm_start, vma->vm_end,
+ "vm_start=%lx, vm_end=%lx, vm_flags=%lx\n",
+ map_info.mpu_addr,
+ map_info.size, vma->vm_start, vma->vm_end,
vma->vm_flags);
}
if (!vma) {
pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
- __func__, va, bytes);
+ __func__, map_info.mpu_addr, map_info.size);
err = -EINVAL;
- goto err;
+ goto exit;
}
/*
* If user provided anonymous address, then don't allocate it from
* from genpool
*/
- if (flags == DMM_DA_ANON) {
+ if (map_info.flags == DMM_DA_ANON) {
gen_pool = NULL;
- da_align = round_down(*da, PAGE_SIZE);
+ da_align = round_down((u32)map_info.da, PAGE_SIZE);
} else {
/* search through the list of available pools to
* pool handle
*/
- gen_pool = get_pool_handle(iovmm_obj, pool_id);
+ gen_pool = get_pool_handle(iovmm_obj, map_info.pool_id);
if (gen_pool)
da_align = gen_pool_alloc(gen_pool, size_align);
else {
err = -EFAULT;
- goto err;
+ goto exit;
}
}
/* Mapped address = MSB of VA | LSB of PA */
- tmp_addr = (da_align | ((u32)va & (PAGE_SIZE - 1)));
- dmm_obj = add_mapping_info(obj, gen_pool, pa_align, tmp_addr,
+ tmp_addr = (da_align | ((u32)map_info.mpu_addr & (PAGE_SIZE - 1)));
+ dmm_obj = add_mapping_info(obj, gen_pool, map_info.mpu_addr, tmp_addr,
size_align);
- *da = tmp_addr;
if (!dmm_obj)
- goto err;
+ goto exit;
+
+ *map_info.da = tmp_addr;
/* Mapping the IO buffers */
if (vma->vm_flags & VM_IO) {
num_of_pages = size_align/PAGE_SIZE;
for (i = 0; i < num_of_pages; i++) {
- io_addr = user_va2_pa(current->mm, pa_align);
+ io_addr = __user_va2_pa(current->mm, addr_align);
pg = phys_to_page(io_addr);
iotlb_init_entry(&e, da_align, io_addr,
@@ -837,82 +941,47 @@ int dmm_user(struct iodmm_struct *obj, u32 pool_id, u32 *da,
MMU_RAM_ELSZ_32);
iopgtable_store_entry(obj->iovmm->iommu, &e);
da_align += PAGE_SIZE;
- pa_align += PAGE_SIZE;
+ addr_align += PAGE_SIZE;
dmm_obj->pages[i] = pg;
}
err = 0;
- goto err;
+ goto exit;
}
/* Mapping the Userspace buffer */
- err = user_to_device_map(iovmm_obj->iommu, pa_align,
+ err = user_to_device_map(iovmm_obj->iommu, addr_align,
da_align, size_align, dmm_obj->pages);
- if (err)
- remove_mapping_information(obj, tmp_addr, size_align);
-err:
+ if (err) {
+ /* clean the entries that were mapped */
+ __user_un_map(obj, tmp_addr);
+ goto exit;
+ }
+#ifdef CONFIG_DMM_DMA_API
+ /*
+ * Build the SG list that would be required for dma map and
+ * unmap APIs
+ */
+ err = build_dma_sg(dmm_obj, map_info.mpu_addr, map_info.size);
+ if (!err) {
+ /*
+ * calling dma_map_sg(cache flush) is essential for
+ * dma_unmap_sg to work since the sg->dma_address required
+ * for dma_unmap_sg is built during dma_map_sg call.
+ */
+ err = memory_give_ownership(iovmm_obj->iommu->dev, dmm_obj,
+ map_info.mpu_addr, map_info.size, DMA_BIDIRECTIONAL);
+ }
+#endif
+
+exit:
+ copy_to_user((void __user *)args, &map_info,
+ sizeof(struct dmm_map_info));
mutex_unlock(&iovmm_obj->dmm_map_lock);
up_read(&mm->mmap_sem);
return err;
}
/**
- * user_un_map - Removes User's mapped address
- * @obj: target dmm object
- * @map_addr Mapped address that needs to be unmapped
- *
- * removes user's dmm buffer mapping
- **/
-int user_un_map(struct iodmm_struct *obj, u32 map_addr)
-{
- int status = 0;
- u32 va_align;
- u32 size_align;
- struct dmm_map_object *map_obj;
- int i;
- struct page *pg;
-
- va_align = round_down(map_addr, PAGE_SIZE);
-
- mutex_lock(&obj->iovmm->dmm_map_lock);
- /*
- * Update DMM structures. Get the size to unmap.
- * This function returns error if the VA is not mapped
- */
- /* find requested memory are in cached mapping information */
- map_obj = find_containing_mapping(obj, 0, map_addr, false, 0);
- if (!map_obj)
- goto err;
- size_align = map_obj->size;
- /* Remove mapping from the page tables. */
- status = user_to_device_unmap(obj->iovmm->iommu, va_align,
- size_align);
- if (status)
- goto err;
-
- i = size_align/PAGE_SIZE;
- while (i--) {
- pg = map_obj->pages[i];
- if (pg && pfn_valid(page_to_pfn(pg))) {
- if (page_count(pg) < 1)
- pr_info("%s UNMAP FAILURE !!!\n", __func__);
- else {
- SetPageDirty(pg);
- page_cache_release(pg);
- }
- }
- }
- /*
- * A successful unmap should be followed by removal of map_obj
- * from dmm_map_list, so that mapped memory resource tracking
- * remains uptodate
- */
- remove_mapping_information(obj, map_obj->da, map_obj->size);
-err:
- mutex_unlock(&obj->iovmm->dmm_map_lock);
- return status;
-}
-
-/**
* user_remove_resources - Removes User's dmm resources
* @obj: target dmm object
*
@@ -926,7 +995,7 @@ void user_remove_resources(struct iodmm_struct *obj)
/* Free DMM mapped memory resources */
list_for_each_entry_safe(map_obj, temp_map, &obj->map_list, link) {
- status = user_un_map(obj, map_obj->da);
+ status = __user_un_map(obj, map_obj->da);
if (status) {
pr_err("%s: proc_un_map failed!"
" status = 0x%x\n", __func__, status);
@@ -937,39 +1006,39 @@ void user_remove_resources(struct iodmm_struct *obj)
/**
* omap_create_dmm_pool - Create DMM pool
* @obj: target dmm object
- * @pool_id pool id to assign to the pool
- * @size Size of the pool
- * @sa Starting Address of the Virtual pool
+ * @args pool information
**/
-int omap_create_dmm_pool(struct iodmm_struct *obj, int pool_id, int size,
- int sa)
+int omap_create_dmm_pool(struct iodmm_struct *obj, const void __user *args)
{
struct iovmm_pool *pool;
struct iovmm_device *iovmm = obj->iovmm;
+ struct iovmm_pool_info pool_info;
+
+ if (copy_from_user(&pool_info, args, sizeof(struct iovmm_pool_info)))
+ return -EFAULT;
- pool = kzalloc(sizeof(struct iovmm_pool), GFP_ATOMIC);
+ pool = kzalloc(sizeof(struct iovmm_pool), GFP_KERNEL);
if (!pool)
- goto err_out;
+ return -EFAULT;
+
+ pool->pool_id = pool_info.pool_id;
+ pool->da_begin = pool_info.da_begin;
+ pool->da_end = pool_info.da_begin + pool_info.size;
- pool->pool_id = pool_id;
- pool->da_begin = sa;
- pool->da_end = sa + size;
pool->genpool = gen_pool_create(12, -1);
- gen_pool_add(pool->genpool, pool->da_begin, size, -1);
+ gen_pool_add(pool->genpool, pool->da_begin, pool_info.size, -1);
+
INIT_LIST_HEAD(&pool->list);
list_add_tail(&pool->list, &iovmm->mmap_pool);
- return 0;
-err_out:
- return -ENOMEM;
+ return 0;
}
/**
- * omap_delete_dmm_pool - Delete DMM pool
+ * omap_delete_dmm_pool - Delete DMM pools
* @obj: target dmm object
- * @pool_id pool id to delete
**/
-int omap_delete_dmm_pool(struct iodmm_struct *obj, int pool_id)
+int omap_delete_dmm_pools(struct iodmm_struct *obj)
{
struct iovmm_pool *pool;
struct iovmm_device *iovmm_obj = obj->iovmm;
@@ -977,14 +1046,88 @@ int omap_delete_dmm_pool(struct iodmm_struct *obj, int pool_id)
list_for_each_safe(_pool, _next_pool, &iovmm_obj->mmap_pool) {
pool = list_entry(_pool, struct iovmm_pool, list);
- if (pool->pool_id == pool_id) {
- gen_pool_destroy(pool->genpool);
- list_del(&pool->list);
- kfree(pool);
- return 0;
+ gen_pool_destroy(pool->genpool);
+ list_del(&pool->list);
+ kfree(pool);
+ }
+
+ return 0;
+}
+
+/**
+ * register_mmufault - Register for MMU fault notification
+ * @obj: target dmm object
+ * @args: Eventfd information
+ *
+ * Registering to MMU fault event notification
+ **/
+int register_mmufault(struct iodmm_struct *obj, const void __user *args)
+{
+ int fd;
+ struct iommu_event_ntfy *fd_reg;
+
+ if (copy_from_user(&fd, args, sizeof(int)))
+ return -EFAULT;
+
+ fd_reg = kzalloc(sizeof(struct iommu_event_ntfy), GFP_KERNEL);
+ fd_reg->fd = fd;
+ fd_reg->evt_ctx = eventfd_ctx_fdget(fd);
+ INIT_LIST_HEAD(&fd_reg->list);
+ spin_lock_irq(&obj->iovmm->iommu->event_lock);
+ list_add_tail(&fd_reg->list, &obj->iovmm->iommu->event_list);
+ spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+
+ return 0;
+}
+
+/**
+ * unregister_mmufault - Unregister for MMU fault notification
+ * @obj: target dmm object
+ * @args: Eventfd information
+ *
+ * Unregister to MMU fault event notification
+ **/
+int unregister_mmufault(struct iodmm_struct *obj, const void __user *args)
+{
+ int fd;
+ struct iommu_event_ntfy *fd_reg, *temp_reg;
+
+ if (copy_from_user(&fd, (void __user *)args, sizeof(int)))
+ return -EFAULT;
+
+ /* Free DMM mapped memory resources */
+ spin_lock_irq(&obj->iovmm->iommu->event_lock);
+ list_for_each_entry_safe(fd_reg, temp_reg,
+ &obj->iovmm->iommu->event_list, list) {
+ if (fd_reg->fd == fd) {
+ list_del(&fd_reg->list);
+ kfree(fd_reg);
}
}
- return -ENODEV;
+ spin_unlock_irq(&obj->iovmm->iommu->event_lock);
+
+ return 0;
+}
+
+/**
+ * program_tlb_entry - Program the IOMMU TLB entry
+ * @obj: target dmm object
+ * @args: TLB entry information
+ *
+ * This function loads the TLB entry that the user specifies.
+ * This function should be used only during remote Processor
+ * boot time.
+ **/
+int program_tlb_entry(struct iodmm_struct *obj, const void __user *args)
+{
+ struct iotlb_entry e;
+ int ret;
+
+ if (copy_from_user(&e, args, sizeof(struct iotlb_entry)))
+ return -EFAULT;
+
+ ret = load_iotlb_entry(obj->iovmm->iommu, &e);
+ return ret;
}
MODULE_LICENSE("GPL v2");
diff --git a/arch/arm/plat-omap/iommu.c b/arch/arm/plat-omap/iommu.c
index d728af8aec6b..abc177d75072 100644
--- a/arch/arm/plat-omap/iommu.c
+++ b/arch/arm/plat-omap/iommu.c
@@ -400,10 +400,10 @@ u32 iommu_save_tlb_entries(struct iommu *obj)
for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr_tmp) {
iotlb_cr_to_e(&cr_tmp, e);
+ dev_dbg(obj->dev, "%s: %08x %08x %d %d %d", __func__, e->da,
+ e->pa, e->pgsz, e->prsvd,
+ e->valid);
e++;
-
- dev_dbg(obj->dev, "%s: [%02x] %08x %08x\n", __func__,
- i, cr_tmp.cam, cr_tmp.ram);
}
return 0;
@@ -429,17 +429,17 @@ u32 iommu_restore_tlb_entries(struct iommu *obj)
goto error;
for (i = 0; i < obj->nr_tlb_entries; i++) {
- if (!e->valid) {
+ if (!e->prsvd) {
e++;
continue;
}
+ dev_dbg(obj->dev, "%s: %08x %08x %d %d %d", __func__, e->da,
+ e->pa, e->pgsz, e->prsvd,
+ e->valid);
status = load_iotlb_entry(obj, e);
if (status)
goto error;
e++;
-
- dev_dbg(obj->dev, "%s: [%02x] %08x\n", __func__,
- i, e->pa);
}
return 0;
diff --git a/arch/arm/plat-omap/remoteproc.c b/arch/arm/plat-omap/remoteproc.c
index 6b1e28de8493..34b32becf490 100644
--- a/arch/arm/plat-omap/remoteproc.c
+++ b/arch/arm/plat-omap/remoteproc.c
@@ -42,7 +42,7 @@ static dev_t omap_rproc_dev;
static atomic_t num_of_rprocs;
-void rproc_eventfd_ntfy(struct omap_rproc *obj, int event)
+static void rproc_eventfd_ntfy(struct omap_rproc *obj, int event)
{
struct omap_rproc_ntfy *fd_reg;
@@ -177,7 +177,8 @@ static inline int rproc_get_state(struct omap_rproc *rproc)
return pdata->ops->get_state(rproc->dev);
}
-int rproc_reg_user_event(struct omap_rproc *rproc, const void __user *arg)
+static int rproc_reg_user_event(struct omap_rproc *rproc,
+ const void __user *arg)
{
struct omap_rproc_ntfy *fd_reg;
int state;
@@ -224,7 +225,8 @@ int rproc_reg_user_event(struct omap_rproc *rproc, const void __user *arg)
return 0;
}
-int rproc_unreg_user_event(struct omap_rproc *rproc, const void __user *arg)
+static int rproc_unreg_user_event(struct omap_rproc *rproc,
+ const void __user *arg)
{
struct omap_rproc_ntfy *fd_reg, *temp_reg;
struct omap_rproc_reg_event_args args;
diff --git a/drivers/dsp/syslink/Kconfig b/drivers/dsp/syslink/Kconfig
index 72ef7da40330..fdd80a92e977 100644
--- a/drivers/dsp/syslink/Kconfig
+++ b/drivers/dsp/syslink/Kconfig
@@ -88,4 +88,10 @@ config DMM_DMA_API
cache operations on the userspace buffers. This option would be
made default once the code is stabilized
+config TILER_PID_KILL_NOTIFICATIONS
+ bool "TILER Event Notification"
+ default n
+ help
+ This is the temporary hack to disable tiler notification
+ on PID_DEATH.
endif
diff --git a/drivers/dsp/syslink/devh/44xx/devh44xx.c b/drivers/dsp/syslink/devh/44xx/devh44xx.c
index 272ea565a993..6d589cf8e0b6 100644
--- a/drivers/dsp/syslink/devh/44xx/devh44xx.c
+++ b/drivers/dsp/syslink/devh/44xx/devh44xx.c
@@ -379,7 +379,11 @@ static int devh44xx_sysm3_tiler_notifier_call(struct notifier_block *nb,
switch ((int)val) {
case TILER_DEVICE_CLOSE:
+#if defined(CONFIG_TILER_PID_KILL_NOTIFICATIONS)
return devh44xx_notifier_call(nb, val, v, pdata);
+#else
+ return 0;
+#endif
default:
return 0;
}
@@ -393,7 +397,11 @@ static int devh44xx_appm3_tiler_notifier_call(struct notifier_block *nb,
switch ((int)val) {
case TILER_DEVICE_CLOSE:
+#if defined(CONFIG_TILER_PID_KILL_NOTIFICATIONS)
return devh44xx_notifier_call(nb, val, v, pdata);
+#else
+ return 0;
+#endif
default:
return 0;
}
@@ -407,7 +415,11 @@ static int devh44xx_tesla_tiler_notifier_call(struct notifier_block *nb,
switch ((int)val) {
case TILER_DEVICE_CLOSE:
+#if defined(CONFIG_TILER_PID_KILL_NOTIFICATIONS)
return devh44xx_notifier_call(nb, val, v, pdata);
+#else
+ return 0;
+#endif
default:
return 0;
}
diff --git a/drivers/dsp/syslink/ipu_pm/ipu_pm.c b/drivers/dsp/syslink/ipu_pm/ipu_pm.c
index a5a25b1783c9..e9868c8016ea 100644
--- a/drivers/dsp/syslink/ipu_pm/ipu_pm.c
+++ b/drivers/dsp/syslink/ipu_pm/ipu_pm.c
@@ -65,10 +65,6 @@
* Macros and types
* ============================================================================
*/
-#define A9 3
-#define SYS_M3 2
-#define APP_M3 1
-#define TESLA 0
#define HW_AUTO 3
#define CM_DUCATI_M3_CLKSTCTRL 0x4A008900
#define SL2_RESOURCE 10
@@ -83,6 +79,8 @@
#define SYSM3_IDLE_FLAG_PHY_ADDR 0x9E0502D8
#define APPM3_IDLE_FLAG_PHY_ADDR 0x9E0502DC
+#define _is_valid_event(e) ((PM_FIRST_EVENT <= e && e <= PM_LAST_EVENT) ? 1 : 0)
+
#define NUM_IDLE_CORES ((__raw_readl(appm3Idle) << 1) + \
(__raw_readl(sysm3Idle)))
@@ -253,6 +251,7 @@ static struct omap_rproc *app_rproc;
static struct omap_mbox *ducati_mbox;
static struct iommu *ducati_iommu;
static bool first_time = 1;
+static bool mpu_hib_ipu;
/* static struct omap_dm_timer *pm_gpt; */
/* Ducati Interrupt Capable Gptimers */
@@ -285,6 +284,7 @@ static struct ipu_pm_params pm_params = {
.pm_iva_hd_counter = 0,
.pm_ivaseq0_counter = 0,
.pm_ivaseq1_counter = 0,
+ .pm_sl2if_counter = 0,
.pm_l3_bus_counter = 0,
.pm_mpu_counter = 0,
.pm_sdmachan_counter = 0,
@@ -608,22 +608,20 @@ void ipu_pm_callback(u16 proc_id, u16 line_id, u32 event_id,
EXPORT_SYMBOL(ipu_pm_callback);
-/*
- Function for PM notifications Callback
- *
+/* Function for PM notifications Callback
+ * This functions receives an event coming from
+ * remote proc as an ack.
+ * Post semaphore based in eventType (payload)
+ * If PM_HIBERNATE is received the save_ctx is triggered
+ * in order to put remote proc in reset.
*/
void ipu_pm_notify_callback(u16 proc_id, u16 line_id, u32 event_id,
uint *arg, u32 payload)
{
- /**
- * Post semaphore based in eventType (payload);
- * IPU has alreay finished the process for the
- * notification
- */
- /* Get the payload */
struct ipu_pm_object *handle;
union message_slicer pm_msg;
struct ipu_pm_params *params;
+ enum pm_event_type event;
int retval;
/* get the handle to proper ipu pm object */
@@ -635,32 +633,27 @@ void ipu_pm_notify_callback(u16 proc_id, u16 line_id, u32 event_id,
return;
pm_msg.whole = payload;
- if (pm_msg.fields.msg_type == PM_NOTIFY_HIBERNATE) {
- /* Remote proc requested hibernate */
- /* Remote Proc is ready to hibernate */
+ /* get the event type sent by remote proc */
+ event = pm_msg.fields.msg_subtype;
+ if (!_is_valid_event(event))
+ goto error;
+ if (event == PM_HIBERNATE) {
+ /* Remote Proc is ready to hibernate
+ * PM_HIBERNATE is a one way notification
+ * Remote proc to Host proc
+ */
+ pr_debug("Remote Proc is ready to hibernate\n");
retval = ipu_pm_save_ctx(proc_id);
if (retval)
- pr_info("Unable to stop proc %d\n", proc_id);
+ pr_err("Unable to stop proc %d\n", proc_id);
} else {
- switch (pm_msg.fields.msg_subtype) {
- case PM_SUSPEND:
- handle->pm_event[PM_SUSPEND].pm_msg = payload;
- up(&handle->pm_event[PM_SUSPEND].sem_handle);
- break;
- case PM_RESUME:
- handle->pm_event[PM_RESUME].pm_msg = payload;
- up(&handle->pm_event[PM_RESUME].sem_handle);
- break;
- case PM_HIBERNATE:
- handle->pm_event[PM_HIBERNATE].pm_msg = payload;
- up(&handle->pm_event[PM_HIBERNATE].sem_handle);
- break;
- case PM_PID_DEATH:
- handle->pm_event[PM_PID_DEATH].pm_msg = payload;
- up(&handle->pm_event[PM_PID_DEATH].sem_handle);
- break;
- }
+ pr_debug("Remote Proc received %d event\n", event);
+ handle->pm_event[event].pm_msg = payload;
+ up(&handle->pm_event[event].sem_handle);
}
+ return;
+error:
+ pr_err("Unknow event received from remote proc: %d\n", event);
}
EXPORT_SYMBOL(ipu_pm_notify_callback);
@@ -749,36 +742,14 @@ int ipu_pm_notifications(enum pm_event_type event_type, void *data)
goto error;
break;
case PM_HIBERNATE:
- pm_msg.fields.msg_type = PM_NOTIFICATIONS;
- pm_msg.fields.msg_subtype = PM_HIBERNATE;
- pm_msg.fields.parm = PM_SUCCESS;
- /* put general purpose message in share memory */
- handle->rcb_table->gp_msg = (unsigned)data;
- /* send the request to IPU*/
- retval = notify_send_event(
- params->remote_proc_id,
- params->line_id,
- params->pm_notification_event | \
- (NOTIFY_SYSTEMKEY << 16),
- (unsigned int)pm_msg.whole,
- true);
- if (retval < 0)
- goto error_send;
- /* wait until event from IPU (ipu_pm_notify_callback)*/
- retval = down_timeout
- (&handle->pm_event[PM_HIBERNATE]
- .sem_handle,
- msecs_to_jiffies(params->timeout));
- pm_msg.whole = handle->pm_event[PM_HIBERNATE].pm_msg;
- if (WARN_ON((retval < 0) ||
- (pm_msg.fields.parm != PM_SUCCESS)))
- goto error;
- else {
- /*Remote Proc is ready to hibernate*/
- pm_ack = ipu_pm_save_ctx(proc_id);
- }
+ pr_err("PM_HIBERNATE event currently not supported\n");
break;
case PM_PID_DEATH:
+ /* Just send the message to appm3 since is the one
+ * running the resource manager.
+ */
+ if (proc_id == SYS_M3)
+ break;
pm_msg.fields.msg_type = PM_NOTIFICATIONS;
pm_msg.fields.msg_subtype = PM_PID_DEATH;
pm_msg.fields.parm = PM_SUCCESS;
@@ -809,9 +780,9 @@ int ipu_pm_notifications(enum pm_event_type event_type, void *data)
return pm_ack;
error_send:
- pr_err("Error notify_send event\n");
+ pr_err("Error notify_send event %d to proc %d\n", event_type, proc_id);
error:
- pr_err("Error sending Notification events\n");
+ pr_err("Error sending Notification event %d\n", event_type);
return -EBUSY;
}
EXPORT_SYMBOL(ipu_pm_notifications);
@@ -1461,13 +1432,14 @@ static inline int ipu_pm_get_ivaseq1(int proc_id, u32 rcb_num)
retval = ipu_pm_module_start(rcb_p->sub_type);
if (retval)
return PM_UNSUPPORTED;
+ params->pm_ivaseq1_counter++;
/*Requesting SL2*/
+ /* FIXME: sl2if should be moved to a independent function */
retval = ipu_pm_module_start(SL2_RESOURCE);
if (retval)
return PM_UNSUPPORTED;
-
- params->pm_ivaseq1_counter++;
+ params->pm_sl2if_counter++;
return PM_SUCCESS;
}
@@ -2033,9 +2005,13 @@ static inline int ipu_pm_rel_iva_hd(int proc_id, u32 rcb_num)
goto error;
/* Releasing SL2 */
- retval = ipu_pm_module_stop(SL2_RESOURCE);
- if (retval)
- return PM_UNSUPPORTED;
+ /* FIXME: sl2if should be moved to a independent function */
+ if (params->pm_sl2if_counter) {
+ retval = ipu_pm_module_stop(SL2_RESOURCE);
+ if (retval)
+ return PM_UNSUPPORTED;
+ params->pm_sl2if_counter--;
+ }
retval = ipu_pm_module_stop(rcb_p->sub_type);
if (retval)
@@ -3241,15 +3217,19 @@ int ipu_pm_save_ctx(int proc_id)
/* get the handle to proper ipu pm object */
handle = ipu_pm_get_handle(proc_id);
- if (WARN_ON(unlikely(handle == NULL)))
- return -EINVAL;
+ if (unlikely(handle == NULL))
+ return 0;
- /* Check if the M3 was loaded */
+ /* get M3's load flag */
sys_loaded = (ipu_pm_get_state(proc_id) & SYS_PROC_LOADED) >>
PROC_LD_SHIFT;
app_loaded = (ipu_pm_get_state(proc_id) & APP_PROC_LOADED) >>
PROC_LD_SHIFT;
+ /* If already down don't kill it twice */
+ if (ipu_pm_get_state(proc_id) & SYS_PROC_DOWN)
+ goto exit;
+
/* Because of the current scheme, we need to check
* if APPM3 is enable and we need to shut it down too
* Sysm3 is the only want sending the hibernate message
@@ -3276,12 +3256,20 @@ int ipu_pm_save_ctx(int proc_id)
/* Check for APPM3, if loaded reset first */
if (app_loaded) {
+ pr_info("Sleep APPM3\n");
retval = rproc_sleep(app_rproc);
+ cm_write_mod_reg(HW_AUTO,
+ OMAP4430_CM2_CORE_MOD,
+ OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET);
if (retval)
goto error;
handle->rcb_table->state_flag |= APP_PROC_DOWN;
}
+ pr_info("Sleep SYSM3\n");
retval = rproc_sleep(sys_rproc);
+ cm_write_mod_reg(HW_AUTO,
+ OMAP4430_CM2_CORE_MOD,
+ OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET);
if (retval)
goto error;
handle->rcb_table->state_flag |= SYS_PROC_DOWN;
@@ -3294,7 +3282,7 @@ exit:
return 0;
error:
mutex_unlock(ipu_pm_state.gate_handle);
- pr_info("Aborting hibernation process\n");
+ pr_debug("Aborting hibernation process\n");
return -EINVAL;
}
EXPORT_SYMBOL(ipu_pm_save_ctx);
@@ -3307,10 +3295,8 @@ EXPORT_SYMBOL(ipu_pm_save_ctx);
int ipu_pm_restore_ctx(int proc_id)
{
int retval = 0;
-#ifdef CONFIG_SYSLINK_DUCATI_PM
int sys_loaded;
int app_loaded;
-#endif
struct ipu_pm_object *handle;
/*If feature not supported by proc, return*/
@@ -3323,23 +3309,26 @@ int ipu_pm_restore_ctx(int proc_id)
if (WARN_ON(unlikely(handle == NULL)))
return -EINVAL;
- /* By default Ducati Hibernation is disable
- * enabling just the first time and if
- * CONFIG_SYSLINK_DUCATI_PM is defined
+ /* FIXME: This needs mor analysis.
+ * Since the sync of IPU and MPU is done this is a safe place
+ * to switch to HW_AUTO to allow transition of clocks to gated
+ * supervised by HW.
*/
if (first_time) {
- handle->rcb_table->state_flag |= ENABLE_IPU_HIB;
+ /* Enable/disable ipu hibernation*/
+#ifdef CONFIG_SYSLINK_DUCATI_PM
+ handle->rcb_table->pm_flags.hibernateAllowed = 1;
+#else
handle->rcb_table->pm_flags.hibernateAllowed = 0;
- handle->rcb_table->pm_flags.idleAllowed = 0;
+#endif
+ pr_info("hibernateAllowed=%d\n",
+ handle->rcb_table->pm_flags.hibernateAllowed);
first_time = 0;
- __raw_writel(HW_AUTO, cm_ducati_clkstctrl);
+ cm_write_mod_reg(HW_AUTO,
+ OMAP4430_CM2_CORE_MOD,
+ OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET);
}
- /* FIXME:This will be avoided with a change in Ducati. */
- handle->rcb_table->pm_flags.idleAllowed = 1;
-
-#ifdef CONFIG_SYSLINK_DUCATI_PM
-
/* Check if the M3 was loaded */
sys_loaded = (ipu_pm_get_state(proc_id) & SYS_PROC_LOADED) >>
PROC_LD_SHIFT;
@@ -3358,12 +3347,20 @@ int ipu_pm_restore_ctx(int proc_id)
omap_mbox_restore_ctx(ducati_mbox);
iommu_restore_ctx(ducati_iommu);
+ pr_info("Wakeup SYSM3\n");
retval = rproc_wakeup(sys_rproc);
+ cm_write_mod_reg(HW_AUTO,
+ OMAP4430_CM2_CORE_MOD,
+ OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET);
if (retval)
goto error;
handle->rcb_table->state_flag &= ~SYS_PROC_DOWN;
if (ipu_pm_get_state(proc_id) & APP_PROC_LOADED) {
+ pr_info("Wakeup APPM3\n");
retval = rproc_wakeup(app_rproc);
+ cm_write_mod_reg(HW_AUTO,
+ OMAP4430_CM2_CORE_MOD,
+ OMAP4_CM_DUCATI_CLKSTCTRL_OFFSET);
if (retval)
goto error;
handle->rcb_table->state_flag &= ~APP_PROC_DOWN;
@@ -3372,14 +3369,11 @@ int ipu_pm_restore_ctx(int proc_id)
goto error;
exit:
mutex_unlock(ipu_pm_state.gate_handle);
-#endif
return retval;
-#ifdef CONFIG_SYSLINK_DUCATI_PM
error:
mutex_unlock(ipu_pm_state.gate_handle);
- pr_info("Aborting restoring process\n");
+ pr_debug("Aborting restoring process\n");
return -EINVAL;
-#endif
}
EXPORT_SYMBOL(ipu_pm_restore_ctx);
@@ -3490,6 +3484,8 @@ int ipu_pm_setup(struct ipu_pm_config *cfg)
/*pm_gpt = omap_dm_timer_request_specific(GP_TIMER_3);
if (pm_gpt == NULL)
retval = -EINVAL;*/
+ /* Reset hibernation from MPU flag */
+ mpu_hib_ipu = 0;
return retval;
exit:
@@ -3645,6 +3641,9 @@ int ipu_pm_detach(u16 remote_proc_id)
goto exit;
}
+ /* Reset the state_flag */
+ handle->rcb_table->state_flag = 0;
+
/* Deleting the handle based on remote_proc_id */
ipu_pm_delete(handle);
diff --git a/drivers/dsp/syslink/ipu_pm/ipu_pm.h b/drivers/dsp/syslink/ipu_pm/ipu_pm.h
index b7e9ff46f900..c3c51d8a16a9 100644
--- a/drivers/dsp/syslink/ipu_pm/ipu_pm.h
+++ b/drivers/dsp/syslink/ipu_pm/ipu_pm.h
@@ -95,6 +95,12 @@
/* Suspend/resume/other... */
#define NUMBER_PM_EVENTS 4
+/* Processors id's */
+#define A9 3
+#define SYS_M3 2
+#define APP_M3 1
+#define TESLA 0
+
#define PM_CSTR_PERF_MASK 0x00000001
#define PM_CSTR_LAT_MASK 0x00000002
#define PM_CSTR_BW_MASK 0x00000004
@@ -287,12 +293,21 @@ enum res_type{
AUX_CLK,
};
-enum pm_event_type{PM_SUSPEND,
+/* Events can start at any number but
+ * should be always consecutive
+ */
+#define PM_FIRST_EVENT 0
+
+enum pm_event_type{PM_SUSPEND = PM_FIRST_EVENT,
PM_RESUME,
PM_PID_DEATH,
PM_HIBERNATE
};
+#define PM_LAST_EVENT ((sizeof(enum pm_event_type) / sizeof(void)) \
+ + PM_FIRST_EVENT\
+ - 1)
+
struct rcb_message {
unsigned rcb_flag:1;
unsigned rcb_num:6;
@@ -366,6 +381,7 @@ struct ipu_pm_params {
int pm_iva_hd_counter;
int pm_ivaseq0_counter;
int pm_ivaseq1_counter;
+ int pm_sl2if_counter;
int pm_l3_bus_counter;
int pm_mpu_counter;
int pm_sdmachan_counter;
diff --git a/drivers/dsp/syslink/omap_notify/notify.c b/drivers/dsp/syslink/omap_notify/notify.c
index 41793b7315fa..b7f2a267b1b8 100644
--- a/drivers/dsp/syslink/omap_notify/notify.c
+++ b/drivers/dsp/syslink/omap_notify/notify.c
@@ -220,6 +220,7 @@ struct notify_object *notify_create(void *driver_handle, u16 remote_proc_id,
obj->remote_proc_id = remote_proc_id;
obj->line_id = line_id;
obj->nesting = 0;
+ mutex_init(&obj->lock);
for (i = 0; i < notify_state.cfg.num_events; i++)
INIT_LIST_HEAD(&obj->event_list[i]);
@@ -335,36 +336,35 @@ int notify_register_event(u16 proc_id, u16 line_id, u32 event_id,
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
listener = kmalloc(sizeof(struct notify_event_listener), GFP_KERNEL);
if (listener == NULL) {
status = NOTIFY_E_MEMORY;
- goto exit_unlock_mutex;
+ goto exit;
}
listener->callback.fn_notify_cbck = notify_callback_fxn;
listener->callback.cbck_arg = cbck_arg;
event_list = &(obj->event_list[stripped_event_id]);
list_was_empty = list_empty(event_list);
+ mutex_lock_killable(&obj->lock);
list_add_tail((struct list_head *) listener, event_list);
- mutex_unlock(notify_state.gate_handle);
+ mutex_unlock(&obj->lock);
if (list_was_empty) {
/* Registering this event for the first time. Need to
* register the callback function.
@@ -373,10 +373,7 @@ int notify_register_event(u16 proc_id, u16 line_id, u32 event_id,
event_id, _notify_exec_many,
(uint *) obj);
}
- goto exit;
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_register_event failed! "
@@ -425,27 +422,25 @@ int notify_register_event_single(u16 proc_id, u16 line_id, u32 event_id,
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
if (obj->callbacks[stripped_event_id].fn_notify_cbck != NULL) {
status = NOTIFY_E_ALREADYEXISTS;
- goto exit_unlock_mutex;
+ goto exit;
}
obj->callbacks[stripped_event_id].fn_notify_cbck = notify_callback_fxn;
@@ -455,9 +450,6 @@ int notify_register_event_single(u16 proc_id, u16 line_id, u32 event_id,
status = driver_handle->fxn_table.register_event(driver_handle,
stripped_event_id);
}
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_register_event_single failed! "
@@ -510,56 +502,50 @@ int notify_unregister_event(u16 proc_id, u16 line_id, u32 event_id,
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
event_list = &(obj->event_list[stripped_event_id]);
if (list_empty(event_list)) {
status = NOTIFY_E_NOTFOUND;
- goto exit_unlock_mutex;
+ goto exit;
}
+ mutex_lock_killable(&obj->lock);
list_for_each_entry(listener, event_list, element) {
/* Hash not matches, take next node */
if ((listener->callback.fn_notify_cbck == notify_callback_fxn)
&& (listener->callback.cbck_arg == cbck_arg)) {
+ list_del((struct list_head *)listener);
found = true;
break;
}
}
if (found == false) {
status = NOTIFY_E_NOTFOUND;
- goto exit_unlock_mutex;
+ mutex_unlock(&obj->lock);
+ goto exit;
}
- /*sys_key = Gate_enterSystem();*/
- list_del((struct list_head *)listener);
- /*Gate_leaveSystem(sys_key);*/
- mutex_unlock(notify_state.gate_handle);
if (list_empty(event_list)) {
status = notify_unregister_event_single(proc_id, line_id,
event_id);
}
+ mutex_unlock(&obj->lock);
kfree(listener);
- goto exit;
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_unregister_event failed! "
@@ -603,28 +589,25 @@ int notify_unregister_event_single(u16 proc_id, u16 line_id, u32 event_id)
goto exit;
}
- status = mutex_lock_interruptible(notify_state.gate_handle);
- if (status)
- goto exit;
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
if (obj->callbacks[stripped_event_id].fn_notify_cbck == NULL) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj->callbacks[stripped_event_id].fn_notify_cbck = NULL;
@@ -633,9 +616,6 @@ int notify_unregister_event_single(u16 proc_id, u16 line_id, u32 event_id)
status = driver_handle->fxn_table.unregister_event(
driver_handle, stripped_event_id);
}
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_unregister_event_single failed! "
@@ -679,22 +659,20 @@ int notify_send_event(u16 proc_id, u16 line_id, u32 event_id, u32 payload,
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
/* Maybe the proc is shutdown this functions will check and
@@ -703,7 +681,7 @@ int notify_send_event(u16 proc_id, u16 line_id, u32 event_id, u32 payload,
* the ducati_clkstctrl mode*/
status = ipu_pm_restore_ctx(proc_id);
if (status)
- goto exit_unlock_mutex;
+ goto exit;
if (proc_id != multiproc_self()) {
status = driver_handle->fxn_table.send_event(driver_handle,
@@ -722,19 +700,11 @@ int notify_send_event(u16 proc_id, u16 line_id, u32 event_id, u32 payload,
/* Event is disabled */
status = NOTIFY_E_EVTDISABLED;
} else {
- /* Leave critical section protection. */
- mutex_unlock(notify_state.gate_handle);
/* Execute the callback function registered to the
* event */
notify_exec(obj, event_id, payload);
- /* Enter critical section protection. TBD: nesting */
- if (mutex_lock_interruptible(notify_state.gate_handle))
- WARN_ON(1);
}
}
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_send_event failed! status = 0x%x",
@@ -771,24 +741,23 @@ u32 notify_disable(u16 proc_id, u16 line_id)
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
+ mutex_lock_killable(&obj->lock);
obj->nesting++;
if (obj->nesting == 1) {
/* Disable receiving all events */
@@ -796,9 +765,7 @@ u32 notify_disable(u16 proc_id, u16 line_id)
driver_handle->fxn_table.disable(driver_handle);
}
key = obj->nesting;
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
+ mutex_unlock(&obj->lock);
exit:
if (status < 0)
printk(KERN_ERR "notify_disable failed! status = 0x%x", status);
@@ -833,38 +800,35 @@ void notify_restore(u16 proc_id, u16 line_id, u32 key)
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
if (key != obj->nesting) {
status = NOTIFY_E_INVALIDSTATE;
- goto exit_unlock_mutex;
+ goto exit;
}
+ mutex_lock_killable(&obj->lock);
obj->nesting--;
if (obj->nesting == 0) {
/* Enable receiving events */
if (proc_id != multiproc_self())
driver_handle->fxn_table.enable(driver_handle);
}
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
+ mutex_unlock(&obj->lock);
exit:
if (status < 0)
printk(KERN_ERR "notify_restore failed! status = 0x%x", status);
@@ -907,22 +871,20 @@ void notify_disable_event(u16 proc_id, u16 line_id, u32 event_id)
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
if (proc_id != multiproc_self()) {
@@ -932,9 +894,6 @@ void notify_disable_event(u16 proc_id, u16 line_id, u32 event_id)
clear_bit(stripped_event_id,
(unsigned long *) &notify_state.local_enable_mask);
}
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_disable_event failed! status = 0x%x",
@@ -979,22 +938,20 @@ void notify_enable_event(u16 proc_id, u16 line_id, u32 event_id)
goto exit;
}
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
driver_handle = notify_get_driver_handle(proc_id, line_id);
if (WARN_ON(driver_handle == NULL)) {
status = NOTIFY_E_DRIVERNOTREGISTERED;
- goto exit_unlock_mutex;
+ goto exit;
}
if (WARN_ON(driver_handle->is_init != NOTIFY_DRIVERINITSTATUS_DONE)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
obj = (struct notify_object *)driver_handle->notify_handle;
if (WARN_ON(obj == NULL)) {
status = NOTIFY_E_FAIL;
- goto exit_unlock_mutex;
+ goto exit;
}
if (proc_id != multiproc_self()) {
@@ -1004,9 +961,6 @@ void notify_enable_event(u16 proc_id, u16 line_id, u32 event_id)
set_bit(stripped_event_id,
(unsigned long *)&notify_state.local_enable_mask);
}
-
-exit_unlock_mutex:
- mutex_unlock(notify_state.gate_handle);
exit:
if (status < 0) {
printk(KERN_ERR "notify_enable_event failed! status = 0x%x",
@@ -1141,19 +1095,12 @@ static void _notify_exec_many(u16 proc_id, u16 line_id, u32 event_id, uint *arg,
event_list = &(obj->event_list[event_id]);
/* Enter critical section protection. */
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
+ mutex_lock_killable(&obj->lock);
/* Use "NULL" to get the first EventListener on the list */
list_for_each_entry(listener, event_list, element) {
- /* Leave critical section protection. */
- mutex_unlock(notify_state.gate_handle);
listener->callback.fn_notify_cbck(proc_id, line_id, event_id,
listener->callback.cbck_arg, payload);
- /* Enter critical section protection. */
- if (mutex_lock_interruptible(notify_state.gate_handle) != 0)
- WARN_ON(1);
}
-
/* Leave critical section protection. */
- mutex_unlock(notify_state.gate_handle);
+ mutex_unlock(&obj->lock);
}