mirror of https://gitee.com/openkylin/linux.git
drm/i915: Use non-atomic kmap for slow copy paths
As we do not have a requirement to be atomic and avoid sleeping whilst performing the slow copy for shmem based pread and pwrite, we can use kmap instead, thus simplifying the code. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Eric Anholt <eric@anholt.net>
This commit is contained in:
parent
9b8c4a0b21
commit
99a03df57c
|
@ -167,7 +167,7 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
|
||||||
obj_priv->tiling_mode != I915_TILING_NONE;
|
obj_priv->tiling_mode != I915_TILING_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline void
|
||||||
slow_shmem_copy(struct page *dst_page,
|
slow_shmem_copy(struct page *dst_page,
|
||||||
int dst_offset,
|
int dst_offset,
|
||||||
struct page *src_page,
|
struct page *src_page,
|
||||||
|
@ -176,25 +176,16 @@ slow_shmem_copy(struct page *dst_page,
|
||||||
{
|
{
|
||||||
char *dst_vaddr, *src_vaddr;
|
char *dst_vaddr, *src_vaddr;
|
||||||
|
|
||||||
dst_vaddr = kmap_atomic(dst_page, KM_USER0);
|
dst_vaddr = kmap(dst_page);
|
||||||
if (dst_vaddr == NULL)
|
src_vaddr = kmap(src_page);
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
src_vaddr = kmap_atomic(src_page, KM_USER1);
|
|
||||||
if (src_vaddr == NULL) {
|
|
||||||
kunmap_atomic(dst_vaddr, KM_USER0);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
|
memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
|
||||||
|
|
||||||
kunmap_atomic(src_vaddr, KM_USER1);
|
kunmap(src_page);
|
||||||
kunmap_atomic(dst_vaddr, KM_USER0);
|
kunmap(dst_page);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline void
|
||||||
slow_shmem_bit17_copy(struct page *gpu_page,
|
slow_shmem_bit17_copy(struct page *gpu_page,
|
||||||
int gpu_offset,
|
int gpu_offset,
|
||||||
struct page *cpu_page,
|
struct page *cpu_page,
|
||||||
|
@ -214,15 +205,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
|
||||||
cpu_page, cpu_offset, length);
|
cpu_page, cpu_offset, length);
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu_vaddr = kmap_atomic(gpu_page, KM_USER0);
|
gpu_vaddr = kmap(gpu_page);
|
||||||
if (gpu_vaddr == NULL)
|
cpu_vaddr = kmap(cpu_page);
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
cpu_vaddr = kmap_atomic(cpu_page, KM_USER1);
|
|
||||||
if (cpu_vaddr == NULL) {
|
|
||||||
kunmap_atomic(gpu_vaddr, KM_USER0);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
|
/* Copy the data, XORing A6 with A17 (1). The user already knows he's
|
||||||
* XORing with the other bits (A9 for Y, A9 and A10 for X)
|
* XORing with the other bits (A9 for Y, A9 and A10 for X)
|
||||||
|
@ -246,10 +230,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
|
||||||
length -= this_length;
|
length -= this_length;
|
||||||
}
|
}
|
||||||
|
|
||||||
kunmap_atomic(cpu_vaddr, KM_USER1);
|
kunmap(cpu_page);
|
||||||
kunmap_atomic(gpu_vaddr, KM_USER0);
|
kunmap(gpu_page);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -425,21 +407,19 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
page_length = PAGE_SIZE - data_page_offset;
|
page_length = PAGE_SIZE - data_page_offset;
|
||||||
|
|
||||||
if (do_bit17_swizzling) {
|
if (do_bit17_swizzling) {
|
||||||
ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
||||||
shmem_page_offset,
|
|
||||||
user_pages[data_page_index],
|
|
||||||
data_page_offset,
|
|
||||||
page_length,
|
|
||||||
1);
|
|
||||||
} else {
|
|
||||||
ret = slow_shmem_copy(user_pages[data_page_index],
|
|
||||||
data_page_offset,
|
|
||||||
obj_priv->pages[shmem_page_index],
|
|
||||||
shmem_page_offset,
|
shmem_page_offset,
|
||||||
page_length);
|
user_pages[data_page_index],
|
||||||
|
data_page_offset,
|
||||||
|
page_length,
|
||||||
|
1);
|
||||||
|
} else {
|
||||||
|
slow_shmem_copy(user_pages[data_page_index],
|
||||||
|
data_page_offset,
|
||||||
|
obj_priv->pages[shmem_page_index],
|
||||||
|
shmem_page_offset,
|
||||||
|
page_length);
|
||||||
}
|
}
|
||||||
if (ret)
|
|
||||||
goto fail_put_pages;
|
|
||||||
|
|
||||||
remain -= page_length;
|
remain -= page_length;
|
||||||
data_ptr += page_length;
|
data_ptr += page_length;
|
||||||
|
@ -900,21 +880,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
|
||||||
page_length = PAGE_SIZE - data_page_offset;
|
page_length = PAGE_SIZE - data_page_offset;
|
||||||
|
|
||||||
if (do_bit17_swizzling) {
|
if (do_bit17_swizzling) {
|
||||||
ret = slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
|
||||||
shmem_page_offset,
|
|
||||||
user_pages[data_page_index],
|
|
||||||
data_page_offset,
|
|
||||||
page_length,
|
|
||||||
0);
|
|
||||||
} else {
|
|
||||||
ret = slow_shmem_copy(obj_priv->pages[shmem_page_index],
|
|
||||||
shmem_page_offset,
|
shmem_page_offset,
|
||||||
user_pages[data_page_index],
|
user_pages[data_page_index],
|
||||||
data_page_offset,
|
data_page_offset,
|
||||||
page_length);
|
page_length,
|
||||||
|
0);
|
||||||
|
} else {
|
||||||
|
slow_shmem_copy(obj_priv->pages[shmem_page_index],
|
||||||
|
shmem_page_offset,
|
||||||
|
user_pages[data_page_index],
|
||||||
|
data_page_offset,
|
||||||
|
page_length);
|
||||||
}
|
}
|
||||||
if (ret)
|
|
||||||
goto fail_put_pages;
|
|
||||||
|
|
||||||
remain -= page_length;
|
remain -= page_length;
|
||||||
data_ptr += page_length;
|
data_ptr += page_length;
|
||||||
|
|
Loading…
Reference in New Issue