aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/drm2/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
authorJean-Sébastien Pédron <dumbbell@FreeBSD.org>2015-10-31 15:09:31 +0000
committerJean-Sébastien Pédron <dumbbell@FreeBSD.org>2015-10-31 15:09:31 +0000
commit25a984748cf92f173499fac132bdcb42a52bc264 (patch)
treec52f4ae05a9aaf7ffe8354c89538b3fe2e9651a4 /sys/dev/drm2/i915/i915_gem_execbuffer.c
parent2dac22dcf38447afb0331b8fedb96edf305027fa (diff)
downloadsrc-25a984748cf92f173499fac132bdcb42a52bc264.tar.gz
src-25a984748cf92f173499fac132bdcb42a52bc264.zip
drm/i915: Reduce diff with Linux 3.8
There is no functional change. The goal is to ease the future update to Linux 3.8's i915 driver. MFC after: 2 months
Notes
Notes: svn path=/head/; revision=290228
Diffstat (limited to 'sys/dev/drm2/i915/i915_gem_execbuffer.c')
-rw-r--r--sys/dev/drm2/i915/i915_gem_execbuffer.c92
1 files changed, 48 insertions, 44 deletions
diff --git a/sys/dev/drm2/i915/i915_gem_execbuffer.c b/sys/dev/drm2/i915/i915_gem_execbuffer.c
index 818efbf144ff..61d0fa15eaa7 100644
--- a/sys/dev/drm2/i915/i915_gem_execbuffer.c
+++ b/sys/dev/drm2/i915/i915_gem_execbuffer.c
@@ -221,9 +221,10 @@ eb_create(int size)
{
struct eb_objects *eb;
- eb = malloc(sizeof(*eb), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ eb = malloc(sizeof(*eb),
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
eb->buckets = hashinit(size, DRM_I915_GEM, &eb->hashmask);
- return (eb);
+ return eb;
}
static void
@@ -250,9 +251,10 @@ eb_get_object(struct eb_objects *eb, unsigned long handle)
LIST_FOREACH(obj, &eb->buckets[handle & eb->hashmask], exec_node) {
if (obj->exec_handle == handle)
- return (obj);
+ return obj;
}
- return (NULL);
+
+ return NULL;
}
static void
@@ -374,7 +376,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* We can't wait for rendering with pagefaults disabled */
if (obj->active && (curthread->td_pflags & TDP_NOFAULTING) != 0)
- return (-EFAULT);
+ return -EFAULT;
reloc->delta += target_offset;
if (use_cpu_reloc(obj)) {
@@ -389,7 +391,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
sf = sf_buf_alloc(obj->pages[OFF_TO_IDX(reloc->offset)],
SFB_NOWAIT);
if (sf == NULL)
- return (-ENOMEM);
+ return -ENOMEM;
vaddr = (void *)sf_buf_kva(sf);
*(uint32_t *)(vaddr + page_offset) = reloc->delta;
sf_buf_free(sf);
@@ -509,14 +511,13 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
i915_gem_retire_requests(dev);
ret = 0;
- pflags = vm_fault_disable_pagefaults();
/* This is the fast path and we cannot handle a pagefault whilst
* holding the device lock lest the user pass in the relocations
* contained within a mmaped bo. For in such a case we, the page
* fault handler would call i915_gem_fault() and we would try to
* acquire the device lock again. Obviously this is bad.
*/
-
+ pflags = vm_fault_disable_pagefaults();
list_for_each_entry(obj, objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
@@ -585,7 +586,8 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
- int ret, retry;
+ int retry;
+ int ret;
dev_priv = ring->dev->dev_private;
INIT_LIST_HEAD(&ordered_objects);
@@ -957,11 +959,12 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
return -EINVAL;
length = exec[i].relocation_count *
- sizeof(struct drm_i915_gem_relocation_entry);
+ sizeof(struct drm_i915_gem_relocation_entry);
if (length == 0) {
(*map)[i] = NULL;
continue;
}
+
/*
* Since both start and end of the relocation region
* may be not aligned on the page boundary, be
@@ -977,7 +980,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
if ((*maplen)[i] == -1) {
free(ma, DRM_I915_GEM);
(*map)[i] = NULL;
- return (-EFAULT);
+ return -EFAULT;
}
}
@@ -1058,7 +1061,7 @@ i915_gem_fix_mi_batchbuffer_end(struct drm_i915_gem_object *batch_obj,
char *mkva;
uint64_t po_r, po_w;
uint32_t cmd;
-
+
po_r = batch_obj->base.dev->agp->base + batch_obj->gtt_offset +
batch_start_offset + batch_len;
if (batch_len > 0)
@@ -1088,7 +1091,7 @@ DRM_DEBUG("batchbuffer does not end by MI_BATCH_BUFFER_END, overwriting last bo
int i915_fix_mi_batchbuffer_end = 0;
- static int
+static int
i915_reset_gen7_sol_offsets(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
@@ -1125,13 +1128,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
- vm_page_t **relocs_ma;
- int *relocs_len;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 seqno;
u32 mask;
int ret, mode, i;
+ vm_page_t **relocs_ma;
+ int *relocs_len;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
@@ -1141,10 +1144,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->batch_len == 0)
return (0);
- ret = validate_exec_list(exec, args->buffer_count, &relocs_ma,
- &relocs_len);
- if (ret != 0)
- goto pre_struct_lock_err;
+ ret = validate_exec_list(exec, args->buffer_count,
+ &relocs_ma, &relocs_len);
+ if (ret)
+ goto pre_mutex_err;
switch (args->flags & I915_EXEC_RING_MASK) {
case I915_EXEC_DEFAULT:
@@ -1157,7 +1160,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
ret = -EPERM;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
break;
case I915_EXEC_BLT:
@@ -1166,20 +1169,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
ret = -EPERM;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
break;
default:
DRM_DEBUG("execbuf with unknown ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (!intel_ring_initialized(ring)) {
DRM_DEBUG("execbuf with invalid ring: %d\n",
(int)(args->flags & I915_EXEC_RING_MASK));
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
@@ -1192,13 +1195,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mode != dev_priv->relative_constants_mode) {
if (INTEL_INFO(dev)->gen < 4) {
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (INTEL_INFO(dev)->gen > 5 &&
mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
/* The HW changed the meaning on this bit on gen6 */
@@ -1209,57 +1212,57 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
default:
DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (args->buffer_count < 1) {
DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (args->num_cliprects != 0) {
if (ring != &dev_priv->rings[RCS]) {
DRM_DEBUG("clip rectangles are only valid with the render ring\n");
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (INTEL_INFO(dev)->gen >= 5) {
DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
DRM_DEBUG("execbuf with %u cliprects\n",
args->num_cliprects);
ret = -EINVAL;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
- cliprects = malloc( sizeof(*cliprects) * args->num_cliprects,
- DRM_I915_GEM, M_WAITOK | M_ZERO);
+ cliprects = malloc(args->num_cliprects * sizeof(*cliprects),
+ DRM_I915_GEM, M_WAITOK | M_ZERO);
ret = -copyin((void *)(uintptr_t)args->cliprects_ptr, cliprects,
sizeof(*cliprects) * args->num_cliprects);
if (ret != 0)
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
if (dev_priv->mm.suspended) {
DRM_UNLOCK(dev);
ret = -EBUSY;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
if (eb == NULL) {
DRM_UNLOCK(dev);
ret = -ENOMEM;
- goto pre_struct_lock_err;
+ goto pre_mutex_err;
}
/* Look up object handles */
@@ -1350,7 +1353,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mode != dev_priv->relative_constants_mode) {
ret = intel_ring_begin(ring, 4);
if (ret)
- goto err;
+ goto err;
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
@@ -1375,9 +1378,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
args->batch_start_offset, args->batch_len);
}
- CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
- exec_start, exec_len);
-
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
ret = i915_emit_box(dev, &cliprects[i],
@@ -1397,6 +1397,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
+ CTR4(KTR_DRM, "ring_dispatch %s %d exec %x %x", ring->name, seqno,
+ exec_start, exec_len);
+
i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
i915_gem_execbuffer_retire_commands(dev, file, ring);
@@ -1411,9 +1414,10 @@ err:
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
+
DRM_UNLOCK(dev);
-pre_struct_lock_err:
+pre_mutex_err:
for (i = 0; i < args->buffer_count; i++) {
if (relocs_ma[i] != NULL) {
vm_page_unhold_pages(relocs_ma[i], relocs_len[i]);
@@ -1461,7 +1465,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
args->buffer_count, ret);
free(exec_list, DRM_I915_GEM);
free(exec2_list, DRM_I915_GEM);
- return (ret);
+ return ret;
}
for (i = 0; i < args->buffer_count; i++) {
@@ -1525,8 +1529,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
/* XXXKIB user-controllable malloc size */
- exec2_list = malloc(sizeof(*exec2_list) * args->buffer_count,
- DRM_I915_GEM, M_WAITOK);
+ exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count,
+ DRM_I915_GEM, M_WAITOK);
ret = -copyin((void *)(uintptr_t)args->buffers_ptr, exec2_list,
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {