aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/drm2
diff options
context:
space:
mode:
authorJohn Baldwin <jhb@FreeBSD.org>2015-10-21 20:49:45 +0000
committerJohn Baldwin <jhb@FreeBSD.org>2015-10-21 20:49:45 +0000
commitd836c868a809e711b608beb64140d3c6791f0066 (patch)
treecad4427d0a1fe46b3ceb556d9d229779ca3038f8 /sys/dev/drm2
parent40a02d00a5d5e2fd8d4c326c63e4b4bc407d5102 (diff)
downloadsrc-d836c868a809e711b608beb64140d3c6791f0066.tar.gz
src-d836c868a809e711b608beb64140d3c6791f0066.zip
i915_gem_do_execbuffer() holds the pages backing each relocation region for
various reasons while executing user commands. After these commands are completed, the pages backing the relocation regions are unheld. Since relocation regions do not have to be page aligned, the code in validate_exec_list() allocates 2 extra page pointers in the array of held pages populated by vm_fault_quick_hold_pages(). However, the cleanup code that unheld the pages always assumed that only the buffer size / PAGE_SIZE pages were used. This meant that non-page aligned buffers would not unheld the last 1 or 2 pages in the list. Fix this by saving the number of held pages returned by vm_fault_quick_hold_pages() for each relocation region and using this count during cleanup. Reviewed by: dumbbell, kib MFC after: 1 week Differential Revision: https://reviews.freebsd.org/D3965
Notes
Notes: svn path=/head/; revision=289719
Diffstat (limited to 'sys/dev/drm2')
-rw-r--r--sys/dev/drm2/i915/i915_gem_execbuffer.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/sys/dev/drm2/i915/i915_gem_execbuffer.c b/sys/dev/drm2/i915/i915_gem_execbuffer.c
index 088043d1a5c8..818efbf144ff 100644
--- a/sys/dev/drm2/i915/i915_gem_execbuffer.c
+++ b/sys/dev/drm2/i915/i915_gem_execbuffer.c
@@ -941,13 +941,15 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
static int
validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
- vm_page_t ***map)
+ vm_page_t ***map, int **maplen)
{
vm_page_t *ma;
int i, length, page_count;
/* XXXKIB various limits checking is missing there */
*map = malloc(count * sizeof(*ma), DRM_I915_GEM, M_WAITOK | M_ZERO);
+ *maplen = malloc(count * sizeof(*maplen), DRM_I915_GEM, M_WAITOK |
+ M_ZERO);
for (i = 0; i < count; i++) {
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
@@ -969,9 +971,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, int count,
page_count = howmany(length, PAGE_SIZE) + 2;
ma = (*map)[i] = malloc(page_count * sizeof(vm_page_t),
DRM_I915_GEM, M_WAITOK | M_ZERO);
- if (vm_fault_quick_hold_pages(&curproc->p_vmspace->vm_map,
- exec[i].relocs_ptr, length, VM_PROT_READ | VM_PROT_WRITE,
- ma, page_count) == -1) {
+ (*maplen)[i] = vm_fault_quick_hold_pages(
+ &curproc->p_vmspace->vm_map, exec[i].relocs_ptr, length,
+ VM_PROT_READ | VM_PROT_WRITE, ma, page_count);
+ if ((*maplen)[i] == -1) {
free(ma, DRM_I915_GEM);
(*map)[i] = NULL;
return (-EFAULT);
@@ -1123,6 +1126,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
vm_page_t **relocs_ma;
+ int *relocs_len;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 seqno;
@@ -1137,7 +1141,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (args->batch_len == 0)
return (0);
- ret = validate_exec_list(exec, args->buffer_count, &relocs_ma);
+ ret = validate_exec_list(exec, args->buffer_count, &relocs_ma,
+ &relocs_len);
if (ret != 0)
goto pre_struct_lock_err;
@@ -1411,13 +1416,11 @@ err:
pre_struct_lock_err:
for (i = 0; i < args->buffer_count; i++) {
if (relocs_ma[i] != NULL) {
- vm_page_unhold_pages(relocs_ma[i], howmany(
- exec[i].relocation_count *
- sizeof(struct drm_i915_gem_relocation_entry),
- PAGE_SIZE));
+ vm_page_unhold_pages(relocs_ma[i], relocs_len[i]);
free(relocs_ma[i], DRM_I915_GEM);
}
}
+ free(relocs_len, DRM_I915_GEM);
free(relocs_ma, DRM_I915_GEM);
free(cliprects, DRM_I915_GEM);
return ret;