summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/gpu/drm-uapi.rst12
-rw-r--r--drivers/dma-buf/udmabuf.c28
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/drm_fourcc.c4
-rw-r--r--drivers/gpu/drm/drm_gem.c19
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c4
-rw-r--r--drivers/gpu/drm/drm_mode_config.c8
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c4
-rw-r--r--drivers/video/fbdev/core/fb_defio.c16
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/drm_gem.h1
-rw-r--r--include/linux/fb.h3
13 files changed, 84 insertions, 49 deletions
diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst
index ce47b4292481..65fb3036a580 100644
--- a/Documentation/gpu/drm-uapi.rst
+++ b/Documentation/gpu/drm-uapi.rst
@@ -402,19 +402,19 @@ It's possible to run the IGT-tests in a VM in two ways:
1. Use IGT inside a VM
2. Use IGT from the host machine and write the results in a shared directory.
-As follow, there is an example of using a VM with a shared directory with
-the host machine to run igt-tests. As an example it's used virtme::
+Following is an example of using a VM with a shared directory with
+the host machine to run igt-tests. This example uses virtme::
$ virtme-run --rwdir /path/for/shared_dir --kdir=path/for/kernel/directory --mods=auto
-Run the igt-tests in the guest machine, as example it's ran the 'kms_flip'
+Run the igt-tests in the guest machine. This example runs the 'kms_flip'
tests::
$ /path/for/igt-gpu-tools/scripts/run-tests.sh -p -s -t "kms_flip.*" -v
-In this example, instead of build the igt_runner, Piglit is used
-(-p option); it's created html summary of the tests results and it's saved
-in the folder "igt-gpu-tools/results"; it's executed only the igt-tests
+In this example, instead of building the igt_runner, Piglit is used
+(-p option). It creates an HTML summary of the test results and saves
+them in the folder "igt-gpu-tools/results". It executes only the igt-tests
matching the -t option.
Display CRC Support
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
index 283816fbd72f..740d6e426ee9 100644
--- a/drivers/dma-buf/udmabuf.c
+++ b/drivers/dma-buf/udmabuf.c
@@ -13,6 +13,8 @@
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <linux/iosys-map.h>
static int list_limit = 1024;
module_param(list_limit, int, 0644);
@@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
return 0;
}
+static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+ void *vaddr;
+
+ dma_resv_assert_held(buf->resv);
+
+ vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+ return 0;
+}
+
+static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+ struct udmabuf *ubuf = buf->priv;
+
+ dma_resv_assert_held(buf->resv);
+
+ vm_unmap_ram(map->vaddr, ubuf->pagecount);
+}
+
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
enum dma_data_direction direction)
{
@@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = {
.unmap_dma_buf = unmap_udmabuf,
.release = release_udmabuf,
.mmap = mmap_udmabuf,
+ .vmap = vmap_udmabuf,
+ .vunmap = vunmap_udmabuf,
.begin_cpu_access = begin_cpu_udmabuf,
.end_cpu_access = end_cpu_udmabuf,
};
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index a1f86e436ae8..b3a731b9170a 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -403,6 +403,13 @@ err:
spin_unlock_irqrestore(&helper->damage_lock, flags);
}
+static void drm_fb_helper_damage_work(struct work_struct *work)
+{
+ struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, damage_work);
+
+ drm_fb_helper_fb_dirty(helper);
+}
+
/**
* drm_fb_helper_prepare - setup a drm_fb_helper structure
* @dev: DRM device
@@ -418,6 +425,7 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
INIT_LIST_HEAD(&helper->kernel_fb_list);
spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
+ INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work);
helper->damage_clip.x1 = helper->damage_clip.y1 = ~0;
mutex_init(&helper->lock);
helper->funcs = funcs;
@@ -549,6 +557,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
return;
cancel_work_sync(&fb_helper->resume_work);
+ cancel_work_sync(&fb_helper->damage_work);
info = fb_helper->info;
if (info) {
@@ -590,16 +599,9 @@ static void drm_fb_helper_add_damage_clip(struct drm_fb_helper *helper, u32 x, u
static void drm_fb_helper_damage(struct drm_fb_helper *helper, u32 x, u32 y,
u32 width, u32 height)
{
- struct fb_info *info = helper->info;
-
drm_fb_helper_add_damage_clip(helper, x, y, width, height);
- /*
- * The current fbdev emulation only flushes buffers if a damage
- * update is necessary. And we can assume that deferred I/O has
- * been enabled as damage updates require deferred I/O for mmap.
- */
- fb_deferred_io_schedule_flush(info);
+ schedule_work(&helper->damage_work);
}
/*
@@ -664,16 +666,10 @@ void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagerefli
if (min_off < max_off) {
drm_fb_helper_memory_range_to_clip(info, min_off, max_off - min_off, &damage_area);
- drm_fb_helper_add_damage_clip(helper, damage_area.x1, damage_area.y1,
- drm_rect_width(&damage_area),
- drm_rect_height(&damage_area));
+ drm_fb_helper_damage(helper, damage_area.x1, damage_area.y1,
+ drm_rect_width(&damage_area),
+ drm_rect_height(&damage_area));
}
-
- /*
- * Flushes all dirty pages from mmap's pageref list and the
- * areas that have been written by struct fb_ops callbacks.
- */
- drm_fb_helper_fb_dirty(helper);
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index 6242dfbe9240..0f17dfa8702b 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -190,6 +190,10 @@ const struct drm_format_info *__drm_format_info(u32 format)
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#ifdef __BIG_ENDIAN
+ { .format = DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+#endif
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b8db675e7fb5..59a0bb5ebd85 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -170,6 +170,20 @@ void drm_gem_private_object_init(struct drm_device *dev,
EXPORT_SYMBOL(drm_gem_private_object_init);
/**
+ * drm_gem_private_object_fini - Finalize a failed drm_gem_object
+ * @obj: drm_gem_object
+ *
+ * Uninitialize an already allocated GEM object when it initialized failed
+ */
+void drm_gem_private_object_fini(struct drm_gem_object *obj)
+{
+ WARN_ON(obj->dma_buf);
+
+ dma_resv_fini(&obj->_resv);
+}
+EXPORT_SYMBOL(drm_gem_private_object_fini);
+
+/**
* drm_gem_object_handle_free - release resources bound to userspace handles
* @obj: GEM object to clean up.
*
@@ -930,12 +944,11 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
- WARN_ON(obj->dma_buf);
-
if (obj->filp)
fput(obj->filp);
- dma_resv_fini(&obj->_resv);
+ drm_gem_private_object_fini(obj);
+
drm_gem_free_mmap_offset(obj);
drm_gem_lru_remove(obj);
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 35138f8a375c..db73234edcbe 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -79,8 +79,10 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
} else {
ret = drm_gem_object_init(dev, obj, size);
}
- if (ret)
+ if (ret) {
+ drm_gem_private_object_fini(obj);
goto err_free;
+ }
ret = drm_gem_create_mmap_offset(obj);
if (ret)
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c
index 688c8afe0bf1..8525ef851540 100644
--- a/drivers/gpu/drm/drm_mode_config.c
+++ b/drivers/gpu/drm/drm_mode_config.c
@@ -399,6 +399,8 @@ static void drm_mode_config_init_release(struct drm_device *dev, void *ptr)
*/
int drmm_mode_config_init(struct drm_device *dev)
{
+ int ret;
+
mutex_init(&dev->mode_config.mutex);
drm_modeset_lock_init(&dev->mode_config.connection_mutex);
mutex_init(&dev->mode_config.idr_mutex);
@@ -420,7 +422,11 @@ int drmm_mode_config_init(struct drm_device *dev)
init_llist_head(&dev->mode_config.connector_free_list);
INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn);
- drm_mode_create_standard_properties(dev);
+ ret = drm_mode_create_standard_properties(dev);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
/* Just to be sure */
dev->mode_config.num_fb = 0;
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index fe09e5be79bd..15d04a0ec623 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
init_completion(&entity->entity_idle);
/* We start in an idle state. */
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
spin_lock_init(&entity->rq_lock);
spsc_queue_init(&entity->job_queue);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 31f3a1267be4..fd22d753b4ed 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -987,7 +987,7 @@ static int drm_sched_main(void *param)
sched_job = drm_sched_entity_pop_job(entity);
if (!sched_job) {
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
continue;
}
@@ -998,7 +998,7 @@ static int drm_sched_main(void *param)
trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
- complete(&entity->entity_idle);
+ complete_all(&entity->entity_idle);
drm_sched_fence_scheduled(s_fence);
if (!IS_ERR_OR_NULL(fence)) {
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index dec678f72a42..c730253ab85c 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -332,19 +332,3 @@ void fb_deferred_io_cleanup(struct fb_info *info)
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
-
-void fb_deferred_io_schedule_flush(struct fb_info *info)
-{
- struct fb_deferred_io *fbdefio = info->fbdefio;
-
- if (WARN_ON_ONCE(!fbdefio))
- return; /* bug in driver logic */
-
- /*
- * There's no requirement from callers to schedule the
- * flush immediately. Rather schedule the worker with a
- * delay and let a few more writes pile up.
- */
- schedule_delayed_work(&info->deferred_work, fbdefio->delay);
-}
-EXPORT_SYMBOL_GPL(fb_deferred_io_schedule_flush);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 455f6c2b8117..b111dc7ada78 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -116,6 +116,7 @@ struct drm_fb_helper_funcs {
* @damage_clip: clip rectangle used with deferred_io to accumulate damage to
* the screen buffer
* @damage_lock: spinlock protecting @damage_clip
+ * @damage_work: worker used to flush the framebuffer
* @resume_work: worker used during resume if the console lock is already taken
*
* This is the main structure used by the fbdev helpers. Drivers supporting
@@ -145,6 +146,7 @@ struct drm_fb_helper {
u32 pseudo_palette[17];
struct drm_clip_rect damage_clip;
spinlock_t damage_lock;
+ struct work_struct damage_work;
struct work_struct resume_work;
/**
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index a17c2f903f81..772a4adf5287 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -405,6 +405,7 @@ int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
+void drm_gem_private_object_fini(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3a822e4357b1..96b96323e9cb 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -663,7 +663,6 @@ extern void fb_deferred_io_open(struct fb_info *info,
struct inode *inode,
struct file *file);
extern void fb_deferred_io_cleanup(struct fb_info *info);
-extern void fb_deferred_io_schedule_flush(struct fb_info *info);
extern int fb_deferred_io_fsync(struct file *file, loff_t start,
loff_t end, int datasync);
@@ -807,7 +806,7 @@ extern int fb_find_mode(struct fb_var_screeninfo *var,
#if defined(CONFIG_VIDEO_NOMODESET)
bool fb_modesetting_disabled(const char *drvname);
#else
-bool fb_modesetting_disabled(const char *drvname)
+static inline bool fb_modesetting_disabled(const char *drvname)
{
return false;
}