Merge tag 'topic/drm-misc-2016-10-24' of git://anongit.freedesktop.org/drm-intel into drm-next

First -misc pull for 4.10:
- drm_format rework from Laurent
- reservation patches from Chris that missed 4.9.
- aspect ratio support in infoframe helpers and drm mode/edid code
  (Shashank Sharma)
- rotation rework from Ville (first parts at least)
- another attempt at the CRC debugfs interface from Tomeu
- piles and piles of misc patches all over

* tag 'topic/drm-misc-2016-10-24' of git://anongit.freedesktop.org/drm-intel: (55 commits)
  drm: Use u64 for intermediate dotclock calculations
  drm/i915: Use the per-plane rotation property
  drm/omap: Use per-plane rotation property
  drm/omap: Set rotation property initial value to BIT(DRM_ROTATE_0) insted of 0
  drm/atmel-hlcdc: Use per-plane rotation property
  drm/arm: Use per-plane rotation property
  drm: Add support for optional per-plane rotation property
  drm/atomic: Reject attempts to use multiple rotation angles at once
  drm: Add drm_rotation_90_or_270()
  dma-buf/sync_file: hold reference to fence when creating sync_file
  drm/virtio: kconfig: Fixup white space.
  drm/fence: release fence reference when canceling event
  drm/i915: Handle early failure during intel_get_load_detect_pipe
  drm/fb_cma_helper: do not free fbdev if there is none
  drm: fix sparse warnings on undeclared symbols in crc debugfs
  gpu: Remove depends on RESET_CONTROLLER when not a provider
  i915: don't call drm_atomic_state_put on invalid pointer
  drm: Don't export the drm_fb_get_bpp_depth() function
  drm/arm: mali-dp: Replace drm_fb_get_bpp_depth() with drm_format_plane_cpp()
  drm: vmwgfx: Replace drm_fb_get_bpp_depth() with drm_format_info()
  ...
This commit is contained in:
Dave Airlie 2016-10-25 16:35:20 +10:00
commit 61d0a04d6f
75 changed files with 1354 additions and 758 deletions

View File

@ -63,6 +63,9 @@ Frame Buffer Functions Reference
DRM Format Handling
===================
.. kernel-doc:: include/drm/drm_fourcc.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_fourcc.c
:export:

View File

@ -216,3 +216,9 @@ interfaces. Especially since all hardware-acceleration interfaces to
userspace are driver specific for efficiency and other reasons these
interfaces can be rather substantial. Hence every driver has its own
chapter.
Testing and validation
======================
.. kernel-doc:: drivers/gpu/drm/drm_debugfs_crc.c
:doc: CRC ABI

View File

@ -280,18 +280,24 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
unsigned *pshared_count,
struct fence ***pshared)
{
unsigned shared_count = 0;
unsigned retry = 1;
struct fence **shared = NULL, *fence_excl = NULL;
int ret = 0;
struct fence **shared = NULL;
struct fence *fence_excl;
unsigned int shared_count;
int ret = 1;
while (retry) {
do {
struct reservation_object_list *fobj;
unsigned seq;
unsigned int i;
seq = read_seqcount_begin(&obj->seq);
shared_count = i = 0;
rcu_read_lock();
seq = read_seqcount_begin(&obj->seq);
fence_excl = rcu_dereference(obj->fence_excl);
if (fence_excl && !fence_get_rcu(fence_excl))
goto unlock;
fobj = rcu_dereference(obj->fence);
if (fobj) {
@ -309,52 +315,37 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
}
ret = -ENOMEM;
shared_count = 0;
break;
}
shared = nshared;
memcpy(shared, fobj->shared, sz);
shared_count = fobj->shared_count;
} else
shared_count = 0;
fence_excl = rcu_dereference(obj->fence_excl);
retry = read_seqcount_retry(&obj->seq, seq);
if (retry)
goto unlock;
if (!fence_excl || fence_get_rcu(fence_excl)) {
unsigned i;
for (i = 0; i < shared_count; ++i) {
if (fence_get_rcu(shared[i]))
continue;
/* uh oh, refcount failed, abort and retry */
while (i--)
fence_put(shared[i]);
if (fence_excl) {
fence_put(fence_excl);
fence_excl = NULL;
}
retry = 1;
break;
shared[i] = rcu_dereference(fobj->shared[i]);
if (!fence_get_rcu(shared[i]))
break;
}
} else
retry = 1;
}
if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
while (i--)
fence_put(shared[i]);
fence_put(fence_excl);
goto unlock;
}
ret = 0;
unlock:
rcu_read_unlock();
}
*pshared_count = shared_count;
if (shared_count)
*pshared = shared;
else {
*pshared = NULL;
} while (ret);
if (!shared_count) {
kfree(shared);
shared = NULL;
}
*pshared_count = shared_count;
*pshared = shared;
*pfence_excl = fence_excl;
return ret;
@ -397,9 +388,6 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
if (fobj)
shared_count = fobj->shared_count;
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
for (i = 0; i < shared_count; ++i) {
struct fence *lfence = rcu_dereference(fobj->shared[i]);
@ -422,9 +410,6 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
if (fence_excl &&
!test_bit(FENCE_FLAG_SIGNALED_BIT, &fence_excl->flags)) {
if (!fence_get_rcu(fence_excl))
@ -439,6 +424,11 @@ long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
rcu_read_unlock();
if (fence) {
if (read_seqcount_retry(&obj->seq, seq)) {
fence_put(fence);
goto retry;
}
ret = fence_wait_timeout(fence, intr, ret);
fence_put(fence);
if (ret > 0 && wait_all && (i + 1 < shared_count))
@ -484,12 +474,13 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
bool test_all)
{
unsigned seq, shared_count;
int ret = true;
int ret;
rcu_read_lock();
retry:
ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
rcu_read_lock();
if (test_all) {
unsigned i;
@ -500,46 +491,35 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
if (fobj)
shared_count = fobj->shared_count;
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
for (i = 0; i < shared_count; ++i) {
struct fence *fence = rcu_dereference(fobj->shared[i]);
ret = reservation_object_test_signaled_single(fence);
if (ret < 0)
goto unlock_retry;
goto retry;
else if (!ret)
break;
}
/*
* There could be a read_seqcount_retry here, but nothing cares
* about whether it's the old or newer fence pointers that are
* signaled. That race could still have happened after checking
* read_seqcount_retry. If you care, use ww_mutex_lock.
*/
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
}
if (!shared_count) {
struct fence *fence_excl = rcu_dereference(obj->fence_excl);
if (read_seqcount_retry(&obj->seq, seq))
goto unlock_retry;
if (fence_excl) {
ret = reservation_object_test_signaled_single(
fence_excl);
if (ret < 0)
goto unlock_retry;
goto retry;
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
}
}
rcu_read_unlock();
return ret;
unlock_retry:
rcu_read_unlock();
goto retry;
}
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);

View File

@ -79,7 +79,7 @@ struct sync_file *sync_file_create(struct fence *fence)
if (!sync_file)
return NULL;
sync_file->fence = fence;
sync_file->fence = fence_get(fence);
snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
fence->ops->get_driver_name(fence),

View File

@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_info.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
drm_rect.o drm_vma_manager.o drm_flip_work.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
@ -23,6 +23,7 @@ drm-$(CONFIG_PCI) += ati_pcigart.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \

View File

@ -90,12 +90,12 @@ static struct fb_ops amdgpufb_ops = {
};
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tiled)
int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int cpp, bool tiled)
{
int aligned = width;
int pitch_mask = 0;
switch (bpp / 8) {
switch (cpp) {
case 1:
pitch_mask = 255;
break;
@ -110,7 +110,7 @@ int amdgpu_align_pitch(struct amdgpu_device *adev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
return aligned;
return aligned * cpp;
}
static void amdgpufb_destroy_pinned_object(struct drm_gem_object *gobj)
@ -139,13 +139,13 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
u32 cpp;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
mode_cmd->pitches[0] = amdgpu_align_pitch(adev, mode_cmd->width, cpp,
fb_tiled);
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitches[0] * height;

View File

@ -407,10 +407,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
if (timeout == 0)
ret = reservation_object_test_signaled_rcu(robj->tbo.resv, true);
else
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true, timeout);
ret = reservation_object_wait_timeout_rcu(robj->tbo.resv, true, true,
timeout);
/* ret == 0 means not signaled,
* ret > 0 means signaled
@ -704,7 +702,8 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
args->pitch = amdgpu_align_pitch(adev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);

View File

@ -223,14 +223,12 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
{
struct hdlcd_drm_private *hdlcd;
struct drm_gem_cma_object *gem;
unsigned int depth, bpp;
u32 src_w, src_h, dest_w, dest_h;
dma_addr_t scanout_start;
if (!plane->state->fb)
return;
drm_fb_get_bpp_depth(plane->state->fb->pixel_format, &depth, &bpp);
src_w = plane->state->src_w >> 16;
src_h = plane->state->src_h >> 16;
dest_w = plane->state->crtc_w;
@ -238,7 +236,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
scanout_start = gem->paddr + plane->state->fb->offsets[0] +
plane->state->crtc_y * plane->state->fb->pitches[0] +
plane->state->crtc_x * bpp / 8;
plane->state->crtc_x *
drm_format_plane_cpp(plane->state->fb->pixel_format, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]);

View File

@ -198,9 +198,6 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *
static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
{
unsigned int depth;
int bpp;
/* RGB888 or BGR888 can't be rotated */
if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
return -EINVAL;
@ -210,9 +207,7 @@ static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16
* worth of pixel data. Required size is then:
* size = rotated_width * (bpp / 8) * 8;
*/
drm_fb_get_bpp_depth(fmt, &depth, &bpp);
return w * bpp;
return w * drm_format_plane_cpp(fmt, 0) * 8;
}
static int malidp550_query_hw(struct malidp_hw_device *hwdev)

View File

@ -254,21 +254,18 @@ int malidp_de_planes_init(struct drm_device *drm)
if (ret < 0)
goto cleanup;
if (!drm->mode_config.rotation_property) {
/* SMART layer can't be rotated */
if (id != DE_SMART) {
unsigned long flags = DRM_ROTATE_0 |
DRM_ROTATE_90 |
DRM_ROTATE_180 |
DRM_ROTATE_270 |
DRM_REFLECT_X |
DRM_REFLECT_Y;
drm->mode_config.rotation_property =
drm_mode_create_rotation_property(drm, flags);
drm_plane_create_rotation_property(&plane->base,
DRM_ROTATE_0,
flags);
}
/* SMART layer can't be rotated */
if (drm->mode_config.rotation_property && (id != DE_SMART))
drm_object_attach_property(&plane->base.base,
drm->mode_config.rotation_property,
DRM_ROTATE_0);
drm_plane_helper_add(&plane->base,
&malidp_de_plane_helper_funcs);

View File

@ -464,7 +464,7 @@ atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
drm_atomic_state_free(old_state);
drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&dc->commit.wait.lock);
@ -521,6 +521,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
else

View File

@ -393,7 +393,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane,
if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 ||
state->base.fb->pixel_format == DRM_FORMAT_NV61) &&
(state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)))
drm_rotation_90_or_270(state->base.rotation))
cfg |= ATMEL_HLCDC_YUV422ROT;
atmel_hlcdc_layer_update_cfg(&plane->layer,
@ -628,7 +628,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (state->base.rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
if (drm_rotation_90_or_270(state->base.rotation)) {
tmp = state->crtc_w;
state->crtc_w = state->crtc_h;
state->crtc_h = tmp;
@ -883,9 +883,9 @@ static int atmel_hlcdc_plane_atomic_get_property(struct drm_plane *p,
return 0;
}
static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc,
struct atmel_hlcdc_plane_properties *props)
static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
const struct atmel_hlcdc_layer_desc *desc,
struct atmel_hlcdc_plane_properties *props)
{
struct regmap *regmap = plane->layer.hlcdc->regmap;
@ -902,10 +902,18 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_GA_MASK);
}
if (desc->layout.xstride && desc->layout.pstride)
drm_object_attach_property(&plane->base.base,
plane->base.dev->mode_config.rotation_property,
DRM_ROTATE_0);
if (desc->layout.xstride && desc->layout.pstride) {
int ret;
ret = drm_plane_create_rotation_property(&plane->base,
DRM_ROTATE_0,
DRM_ROTATE_0 |
DRM_ROTATE_90 |
DRM_ROTATE_180 |
DRM_ROTATE_270);
if (ret)
return ret;
}
if (desc->layout.csc) {
/*
@ -925,6 +933,8 @@ static void atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
ATMEL_HLCDC_LAYER_CSC_CFG(&plane->layer, 2),
0x40040890);
}
return 0;
}
static struct drm_plane_helper_funcs atmel_hlcdc_layer_plane_helper_funcs = {
@ -1036,7 +1046,9 @@ atmel_hlcdc_plane_create(struct drm_device *dev,
&atmel_hlcdc_layer_plane_helper_funcs);
/* Set default property values*/
atmel_hlcdc_plane_init_properties(plane, desc, props);
ret = atmel_hlcdc_plane_init_properties(plane, desc, props);
if (ret)
return ERR_PTR(ret);
return plane;
}
@ -1054,15 +1066,6 @@ atmel_hlcdc_plane_create_properties(struct drm_device *dev)
if (!props->alpha)
return ERR_PTR(-ENOMEM);
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
DRM_ROTATE_0 |
DRM_ROTATE_90 |
DRM_ROTATE_180 |
DRM_ROTATE_270);
if (!dev->mode_config.rotation_property)
return ERR_PTR(-ENOMEM);
return props;
}

View File

@ -138,12 +138,12 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
{
struct drm_device *dev = afbdev->helper.dev;
struct cirrus_device *cdev = dev->dev_private;
u32 bpp, depth;
u32 bpp;
u32 size;
struct drm_gem_object *gobj;
int ret = 0;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))

View File

@ -52,10 +52,10 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
struct cirrus_device *cdev = dev->dev_private;
struct drm_gem_object *obj;
struct cirrus_framebuffer *cirrus_fb;
u32 bpp;
int ret;
u32 bpp, depth;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
bpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0) * 8;
if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
bpp, mode_cmd->pitches[0]))

View File

@ -74,6 +74,8 @@ EXPORT_SYMBOL(drm_atomic_state_default_release);
int
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
{
kref_init(&state->ref);
/* TODO legacy paths should maybe do a better job about
* setting this appropriately?
*/
@ -215,22 +217,16 @@ void drm_atomic_state_clear(struct drm_atomic_state *state)
EXPORT_SYMBOL(drm_atomic_state_clear);
/**
* drm_atomic_state_free - free all memory for an atomic state
* @state: atomic state to deallocate
* __drm_atomic_state_free - free all memory for an atomic state
* @ref: This atomic state to deallocate
*
* This frees all memory associated with an atomic state, including all the
* per-object state for planes, crtcs and connectors.
*/
void drm_atomic_state_free(struct drm_atomic_state *state)
void __drm_atomic_state_free(struct kref *ref)
{
struct drm_device *dev;
struct drm_mode_config *config;
if (!state)
return;
dev = state->dev;
config = &dev->mode_config;
struct drm_atomic_state *state = container_of(ref, typeof(*state), ref);
struct drm_mode_config *config = &state->dev->mode_config;
drm_atomic_state_clear(state);
@ -243,7 +239,7 @@ void drm_atomic_state_free(struct drm_atomic_state *state)
kfree(state);
}
}
EXPORT_SYMBOL(drm_atomic_state_free);
EXPORT_SYMBOL(__drm_atomic_state_free);
/**
* drm_atomic_get_crtc_state - get crtc state
@ -709,7 +705,10 @@ int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_w = val;
} else if (property == config->prop_src_h) {
state->src_h = val;
} else if (property == config->rotation_property) {
} else if (property == config->rotation_property ||
property == plane->rotation_property) {
if (!is_power_of_2(val & DRM_ROTATE_MASK))
return -EINVAL;
state->rotation = val;
} else if (property == plane->zpos_property) {
state->zpos = val;
@ -767,7 +766,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_w;
} else if (property == config->prop_src_h) {
*val = state->src_h;
} else if (property == config->rotation_property) {
} else if (property == config->rotation_property ||
property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
*val = state->zpos;
@ -1742,7 +1742,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) {
/*
* Unlike commit, check_only does not clean up state.
* Below we call drm_atomic_state_free for it.
* Below we call drm_atomic_state_put for it.
*/
ret = drm_atomic_check_only(state);
} else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) {
@ -1775,8 +1775,7 @@ int drm_mode_atomic_ioctl(struct drm_device *dev,
goto retry;
}
if (ret || arg->flags & DRM_MODE_ATOMIC_TEST_ONLY)
drm_atomic_state_free(state);
drm_atomic_state_put(state);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);

View File

@ -458,10 +458,11 @@ mode_fixup(struct drm_atomic_state *state)
* removed from the crtc.
* crtc_state->active_changed is set when crtc_state->active changes,
* which is used for dpms.
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
*
* Drivers which update ->mode_changed (e.g. in their ->atomic_check hooks if a
* Drivers which set ->mode_changed (e.g. in their ->atomic_check hooks if a
* plane update can't be done without a full modeset) _must_ call this function
* afterwards after that change. It is permitted to call this function multiple
* times for the same update, e.g. when the ->atomic_check functions depend upon
@ -510,9 +511,9 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
for_each_connector_in_state(state, connector, connector_state, i) {
/*
* This only sets crtc->mode_changed for routing changes,
* drivers must set crtc->mode_changed themselves when connector
* properties need to be updated.
* This only sets crtc->connectors_changed for routing changes,
* drivers must set crtc->connectors_changed themselves when
* connector properties need to be updated.
*/
ret = update_connector_routing(state, connector,
connector_state);
@ -1207,7 +1208,7 @@ static void commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
}
static void commit_work(struct work_struct *work)
@ -1289,6 +1290,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
* make sure work items don't artifically stall on each another.
*/
drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@ -1590,7 +1592,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
*
* This signals completion of the atomic update @state, including any cleanup
* work. If used, it must be called right before calling
* drm_atomic_state_free().
* drm_atomic_state_put().
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@ -2113,18 +2115,13 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
state->legacy_cursor_update = true;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2186,18 +2183,13 @@ int drm_atomic_helper_disable_plane(struct drm_plane *plane)
goto fail;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2326,18 +2318,13 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set)
goto fail;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2412,7 +2399,7 @@ int __drm_atomic_helper_set_config(struct drm_mode_set *set,
primary_state->crtc_h = vdisplay;
primary_state->src_x = set->x << 16;
primary_state->src_y = set->y << 16;
if (primary_state->rotation & (DRM_ROTATE_90 | DRM_ROTATE_270)) {
if (drm_rotation_90_or_270(primary_state->rotation)) {
primary_state->src_w = vdisplay << 16;
primary_state->src_h = hdisplay << 16;
} else {
@ -2479,11 +2466,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev,
}
err = drm_atomic_commit(state);
free:
if (err < 0)
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return err;
}
EXPORT_SYMBOL(drm_atomic_helper_disable_all);
@ -2534,7 +2518,7 @@ struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev)
err = drm_atomic_helper_disable_all(dev, &ctx);
if (err < 0) {
drm_atomic_state_free(state);
drm_atomic_state_put(state);
state = ERR_PTR(err);
goto unlock;
}
@ -2623,18 +2607,13 @@ drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
goto fail;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2683,18 +2662,13 @@ drm_atomic_helper_plane_set_property(struct drm_plane *plane,
goto fail;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2743,18 +2717,13 @@ drm_atomic_helper_connector_set_property(struct drm_connector *connector,
goto fail;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2827,18 +2796,13 @@ int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
}
ret = drm_atomic_nonblocking_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -2914,19 +2878,14 @@ int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
crtc_state->active = active;
ret = drm_atomic_commit(state);
if (ret != 0)
goto fail;
/* Driver takes ownership of state on successful commit. */
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
connector->dpms = old_mode;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);
@ -3333,7 +3292,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev,
free:
if (err < 0) {
drm_atomic_state_free(state);
drm_atomic_state_put(state);
state = ERR_PTR(err);
}
@ -3448,22 +3407,14 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
goto fail;
ret = drm_atomic_commit(state);
if (ret)
goto fail;
/* Driver takes ownership of state on successful commit. */
drm_property_unreference_blob(blob);
return 0;
fail:
if (ret == -EDEADLK)
goto backoff;
drm_atomic_state_free(state);
drm_atomic_state_put(state);
drm_property_unreference_blob(blob);
return ret;
backoff:
drm_atomic_state_clear(state);
drm_atomic_legacy_backoff(state);

View File

@ -162,6 +162,41 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_create_rotation_property);
int drm_plane_create_rotation_property(struct drm_plane *plane,
unsigned int rotation,
unsigned int supported_rotations)
{
static const struct drm_prop_enum_list props[] = {
{ __builtin_ffs(DRM_ROTATE_0) - 1, "rotate-0" },
{ __builtin_ffs(DRM_ROTATE_90) - 1, "rotate-90" },
{ __builtin_ffs(DRM_ROTATE_180) - 1, "rotate-180" },
{ __builtin_ffs(DRM_ROTATE_270) - 1, "rotate-270" },
{ __builtin_ffs(DRM_REFLECT_X) - 1, "reflect-x" },
{ __builtin_ffs(DRM_REFLECT_Y) - 1, "reflect-y" },
};
struct drm_property *prop;
WARN_ON((supported_rotations & DRM_ROTATE_MASK) == 0);
WARN_ON(!is_power_of_2(rotation & DRM_ROTATE_MASK));
WARN_ON(rotation & ~supported_rotations);
prop = drm_property_create_bitmask(plane->dev, 0, "rotation",
props, ARRAY_SIZE(props),
supported_rotations);
if (!prop)
return -ENOMEM;
drm_object_attach_property(&plane->base, prop, rotation);
if (plane->state)
plane->state->rotation = rotation;
plane->rotation_property = prop;
return 0;
}
EXPORT_SYMBOL(drm_plane_create_rotation_property);
/**
* drm_rotation_simplify() - Try to simplify the rotation
* @rotation: Rotation to be simplified

View File

@ -40,7 +40,7 @@
#include <drm/drm_modeset_lock.h>
#include <drm/drm_atomic.h>
#include <drm/drm_auth.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_debugfs_crc.h>
#include "drm_crtc_internal.h"
#include "drm_internal.h"
@ -122,6 +122,10 @@ static int drm_crtc_register_all(struct drm_device *dev)
int ret = 0;
drm_for_each_crtc(crtc, dev) {
if (drm_debugfs_crtc_add(crtc))
DRM_ERROR("Failed to initialize debugfs entry for CRTC '%s'.\n",
crtc->name);
if (crtc->funcs->late_register)
ret = crtc->funcs->late_register(crtc);
if (ret)
@ -138,9 +142,29 @@ static void drm_crtc_unregister_all(struct drm_device *dev)
drm_for_each_crtc(crtc, dev) {
if (crtc->funcs->early_unregister)
crtc->funcs->early_unregister(crtc);
drm_debugfs_crtc_remove(crtc);
}
}
static int drm_crtc_crc_init(struct drm_crtc *crtc)
{
#ifdef CONFIG_DEBUG_FS
spin_lock_init(&crtc->crc.lock);
init_waitqueue_head(&crtc->crc.wq);
crtc->crc.source = kstrdup("auto", GFP_KERNEL);
if (!crtc->crc.source)
return -ENOMEM;
#endif
return 0;
}
static void drm_crtc_crc_fini(struct drm_crtc *crtc)
{
#ifdef CONFIG_DEBUG_FS
kfree(crtc->crc.source);
#endif
}
/**
* drm_crtc_init_with_planes - Initialise a new CRTC object with
* specified primary and cursor planes.
@ -210,6 +234,12 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
if (cursor)
cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
ret = drm_crtc_crc_init(crtc);
if (ret) {
drm_mode_object_unregister(dev, &crtc->base);
return ret;
}
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&crtc->base, config->prop_active, 0);
drm_object_attach_property(&crtc->base, config->prop_mode_id, 0);
@ -236,6 +266,8 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
* the indices on the drm_crtc after us in the crtc_list.
*/
drm_crtc_crc_fini(crtc);
kfree(crtc->gamma_store);
crtc->gamma_store = NULL;
@ -695,8 +727,7 @@ int drm_crtc_check_viewport(const struct drm_crtc *crtc,
drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
if (crtc->state &&
crtc->primary->state->rotation & (DRM_ROTATE_90 |
DRM_ROTATE_270))
drm_rotation_90_or_270(crtc->primary->state->rotation))
swap(hdisplay, vdisplay);
return drm_framebuffer_check_src_coords(x << 16, y << 16,

View File

@ -415,5 +415,37 @@ void drm_debugfs_connector_remove(struct drm_connector *connector)
connector->debugfs_entry = NULL;
}
#endif /* CONFIG_DEBUG_FS */
int drm_debugfs_crtc_add(struct drm_crtc *crtc)
{
struct drm_minor *minor = crtc->dev->primary;
struct dentry *root;
char *name;
name = kasprintf(GFP_KERNEL, "crtc-%d", crtc->index);
if (!name)
return -ENOMEM;
root = debugfs_create_dir(name, minor->debugfs_root);
kfree(name);
if (!root)
return -ENOMEM;
crtc->debugfs_entry = root;
if (drm_debugfs_crtc_crc_add(crtc))
goto error;
return 0;
error:
drm_debugfs_crtc_remove(crtc);
return -ENOMEM;
}
void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
{
debugfs_remove_recursive(crtc->debugfs_entry);
crtc->debugfs_entry = NULL;
}
#endif /* CONFIG_DEBUG_FS */

View File

@ -0,0 +1,352 @@
/*
* Copyright © 2008 Intel Corporation
* Copyright © 2016 Collabora Ltd
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Based on code from the i915 driver.
* Original author: Damien Lespiau <damien.lespiau@intel.com>
*
*/
#include <linux/circ_buf.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <drm/drmP.h>
#include "drm_internal.h"
/**
* DOC: CRC ABI
*
* DRM device drivers can provide to userspace CRC information of each frame as
* it reached a given hardware component (a "source").
*
* Userspace can control generation of CRCs in a given CRTC by writing to the
* file dri/0/crtc-N/crc/control in debugfs, with N being the index of the CRTC.
* Accepted values are source names (which are driver-specific) and the "auto"
* keyword, which will let the driver select a default source of frame CRCs
* for this CRTC.
*
* Once frame CRC generation is enabled, userspace can capture them by reading
* the dri/0/crtc-N/crc/data file. Each line in that file contains the frame
* number in the first field and then a number of unsigned integer fields
* containing the CRC data. Fields are separated by a single space and the number
* of CRC fields is source-specific.
*
* Note that though in some cases the CRC is computed in a specified way and on
* the frame contents as supplied by userspace (eDP 1.3), in general the CRC
* computation is performed in an unspecified way and on frame contents that have
* been already processed in also an unspecified way and thus userspace cannot
* rely on being able to generate matching CRC values for the frame contents that
* it submits. In this general case, the maximum userspace can do is to compare
* the reported CRCs of frames that should have the same contents.
*/
static int crc_control_show(struct seq_file *m, void *data)
{
struct drm_crtc *crtc = m->private;
seq_printf(m, "%s\n", crtc->crc.source);
return 0;
}
static int crc_control_open(struct inode *inode, struct file *file)
{
struct drm_crtc *crtc = inode->i_private;
return single_open(file, crc_control_show, crtc);
}
static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
size_t len, loff_t *offp)
{
struct seq_file *m = file->private_data;
struct drm_crtc *crtc = m->private;
struct drm_crtc_crc *crc = &crtc->crc;
char *source;
if (len == 0)
return 0;
if (len > PAGE_SIZE - 1) {
DRM_DEBUG_KMS("Expected < %lu bytes into crtc crc control\n",
PAGE_SIZE);
return -E2BIG;
}
source = memdup_user_nul(ubuf, len);
if (IS_ERR(source))
return PTR_ERR(source);
if (source[len] == '\n')
source[len] = '\0';
spin_lock_irq(&crc->lock);
if (crc->opened) {
spin_unlock_irq(&crc->lock);
kfree(source);
return -EBUSY;
}
kfree(crc->source);
crc->source = source;
spin_unlock_irq(&crc->lock);
*offp += len;
return len;
}
static const struct file_operations drm_crtc_crc_control_fops = {
.owner = THIS_MODULE,
.open = crc_control_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = crc_control_write
};
static int crtc_crc_open(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entries = NULL;
size_t values_cnt;
int ret;
if (crc->opened)
return -EBUSY;
ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
if (ret)
return ret;
if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
ret = -EINVAL;
goto err_disable;
}
if (WARN_ON(values_cnt == 0)) {
ret = -EINVAL;
goto err_disable;
}
entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
if (!entries) {
ret = -ENOMEM;
goto err_disable;
}
spin_lock_irq(&crc->lock);
crc->entries = entries;
crc->values_cnt = values_cnt;
crc->opened = true;
spin_unlock_irq(&crc->lock);
return 0;
err_disable:
crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
return ret;
}
static int crtc_crc_release(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
size_t values_cnt;
spin_lock_irq(&crc->lock);
kfree(crc->entries);
crc->entries = NULL;
crc->head = 0;
crc->tail = 0;
crc->values_cnt = 0;
crc->opened = false;
spin_unlock_irq(&crc->lock);
crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
return 0;
}
static int crtc_crc_data_count(struct drm_crtc_crc *crc)
{
assert_spin_locked(&crc->lock);
return CIRC_CNT(crc->head, crc->tail, DRM_CRC_ENTRIES_NR);
}
/*
* 1 frame field of 10 chars plus a number of CRC fields of 10 chars each, space
* separated, with a newline at the end and null-terminated.
*/
#define LINE_LEN(values_cnt) (10 + 11 * values_cnt + 1 + 1)
#define MAX_LINE_LEN (LINE_LEN(DRM_MAX_CRC_NR))
static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf,
size_t count, loff_t *pos)
{
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry;
char buf[MAX_LINE_LEN];
int ret, i;
spin_lock_irq(&crc->lock);
if (!crc->source) {
spin_unlock_irq(&crc->lock);
return 0;
}
/* Nothing to read? */
while (crtc_crc_data_count(crc) == 0) {
if (filep->f_flags & O_NONBLOCK) {
spin_unlock_irq(&crc->lock);
return -EAGAIN;
}
ret = wait_event_interruptible_lock_irq(crc->wq,
crtc_crc_data_count(crc),
crc->lock);
if (ret) {
spin_unlock_irq(&crc->lock);
return ret;
}
}
/* We know we have an entry to be read */
entry = &crc->entries[crc->tail];
if (count < LINE_LEN(crc->values_cnt)) {
spin_unlock_irq(&crc->lock);
return -EINVAL;
}
BUILD_BUG_ON_NOT_POWER_OF_2(DRM_CRC_ENTRIES_NR);
crc->tail = (crc->tail + 1) & (DRM_CRC_ENTRIES_NR - 1);
spin_unlock_irq(&crc->lock);
if (entry->has_frame_counter)
sprintf(buf, "0x%08x", entry->frame);
else
sprintf(buf, "XXXXXXXXXX");
for (i = 0; i < crc->values_cnt; i++)
sprintf(buf + 10 + i * 11, " 0x%08x", entry->crcs[i]);
sprintf(buf + 10 + crc->values_cnt * 11, "\n");
if (copy_to_user(user_buf, buf, LINE_LEN(crc->values_cnt)))
return -EFAULT;
return LINE_LEN(crc->values_cnt);
}
static const struct file_operations drm_crtc_crc_data_fops = {
.owner = THIS_MODULE,
.open = crtc_crc_open,
.read = crtc_crc_read,
.release = crtc_crc_release,
};
/**
* drm_debugfs_crtc_crc_add - Add files to debugfs for capture of frame CRCs
* @crtc: CRTC to whom the frames will belong
*
* Adds files to debugfs directory that allows userspace to control the
* generation of frame CRCs and to read them.
*
* Returns:
* Zero on success, error code on failure.
*/
int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
struct dentry *crc_ent, *ent;
if (!crtc->funcs->set_crc_source)
return 0;
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
if (!crc_ent)
return -ENOMEM;
ent = debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_control_fops);
if (!ent)
goto error;
ent = debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_data_fops);
if (!ent)
goto error;
return 0;
error:
debugfs_remove_recursive(crc_ent);
return -ENOMEM;
}
/**
* drm_crtc_add_crc_entry - Add entry with CRC information for a frame
* @crtc: CRTC to which the frame belongs
* @has_frame: whether this entry has a frame number to go with
* @frame: number of the frame these CRCs are about
* @crcs: array of CRC values, with length matching #drm_crtc_crc.values_cnt
*
* For each frame, the driver polls the source of CRCs for new data and calls
* this function to add them to the buffer from where userspace reads.
*/
int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
uint32_t frame, uint32_t *crcs)
{
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry;
int head, tail;
assert_spin_locked(&crc->lock);
/* Caller may not have noticed yet that userspace has stopped reading */
if (!crc->opened)
return -EINVAL;
head = crc->head;
tail = crc->tail;
if (CIRC_SPACE(head, tail, DRM_CRC_ENTRIES_NR) < 1) {
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
return -ENOBUFS;
}
entry = &crc->entries[head];
entry->frame = frame;
entry->has_frame_counter = has_frame;
memcpy(&entry->crcs, crcs, sizeof(*crcs) * crc->values_cnt);
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
crc->head = head;
return 0;
}
EXPORT_SYMBOL_GPL(drm_crtc_add_crc_entry);

View File

@ -1282,20 +1282,20 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
void *data)
{
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
u8 *edid, *new;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
/* base block fetch */
for (i = 0; i < 4; i++) {
if (get_edid_block(data, block, 0, EDID_LENGTH))
if (get_edid_block(data, edid, 0, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block, 0, print_bad_edid,
if (drm_edid_block_valid(edid, 0, print_bad_edid,
&connector->edid_corrupt))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
connector->null_edid_counter++;
goto carp;
}
@ -1304,24 +1304,22 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
goto carp;
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
return (struct edid *)block;
if (edid[0x7e] == 0)
return (struct edid *)edid;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
new = krealloc(edid, (edid[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
block = new;
edid = new;
for (j = 1; j <= edid[0x7e]; j++) {
u8 *block = edid + (valid_extensions + 1) * EDID_LENGTH;
for (j = 1; j <= block[0x7e]; j++) {
for (i = 0; i < 4; i++) {
if (get_edid_block(data,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
if (get_edid_block(data, block, j, EDID_LENGTH))
goto out;
if (drm_edid_block_valid(block + (valid_extensions + 1)
* EDID_LENGTH, j,
print_bad_edid,
NULL)) {
if (drm_edid_block_valid(block, j,
print_bad_edid, NULL)) {
valid_extensions++;
break;
}
@ -1336,16 +1334,16 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
}
}
if (valid_extensions != block[0x7e]) {
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
block[0x7e] = valid_extensions;
new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (valid_extensions != edid[0x7e]) {
edid[EDID_LENGTH-1] += edid[0x7e] - valid_extensions;
edid[0x7e] = valid_extensions;
new = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
block = new;
edid = new;
}
return (struct edid *)block;
return (struct edid *)edid;
carp:
if (print_bad_edid) {
@ -1355,7 +1353,7 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
connector->bad_edid_counter++;
out:
kfree(block);
kfree(edid);
return NULL;
}
EXPORT_SYMBOL_GPL(drm_do_get_edid);

View File

@ -176,20 +176,20 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs)
{
const struct drm_format_info *info;
struct drm_fb_cma *fb_cma;
struct drm_gem_cma_object *objs[4];
struct drm_gem_object *obj;
unsigned int hsub;
unsigned int vsub;
int ret;
int i;
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
info = drm_format_info(mode_cmd->pixel_format);
if (!info)
return ERR_PTR(-EINVAL);
for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) {
unsigned int width = mode_cmd->width / (i ? hsub : 1);
unsigned int height = mode_cmd->height / (i ? vsub : 1);
for (i = 0; i < info->num_planes; i++) {
unsigned int width = mode_cmd->width / (i ? info->hsub : 1);
unsigned int height = mode_cmd->height / (i ? info->vsub : 1);
unsigned int min_size;
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
@ -200,7 +200,7 @@ struct drm_framebuffer *drm_fb_cma_create_with_funcs(struct drm_device *dev,
}
min_size = (height - 1) * mode_cmd->pitches[i]
+ width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ width * info->cpp[i]
+ mode_cmd->offsets[i];
if (obj->size < min_size) {
@ -269,12 +269,15 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
const struct drm_format_info *info;
int i;
seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height,
(char *)&fb->pixel_format);
for (i = 0; i < n; i++) {
info = drm_format_info(fb->pixel_format);
for (i = 0; i < info->num_planes; i++) {
seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
i, fb->offsets[i], fb->pitches[i]);
drm_gem_cma_describe(fb_cma->obj[i], m);
@ -557,7 +560,8 @@ EXPORT_SYMBOL_GPL(drm_fbdev_cma_init);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma)
{
drm_fb_helper_unregister_fbi(&fbdev_cma->fb_helper);
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
if (fbdev_cma->fb_helper.fbdev)
drm_fbdev_cma_defio_fini(fbdev_cma->fb_helper.fbdev);
drm_fb_helper_release_fbi(&fbdev_cma->fb_helper);
if (fbdev_cma->fb) {

View File

@ -367,9 +367,7 @@ static int restore_fbdev_mode_atomic(struct drm_fb_helper *fb_helper)
if (ret == -EDEADLK)
goto backoff;
if (ret != 0)
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:
@ -394,7 +392,11 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
drm_plane_force_disable(plane);
if (dev->mode_config.rotation_property) {
if (plane->rotation_property) {
drm_mode_plane_set_obj_prop(plane,
plane->rotation_property,
DRM_ROTATE_0);
} else if (dev->mode_config.rotation_property) {
drm_mode_plane_set_obj_prop(plane,
dev->mode_config.rotation_property,
DRM_ROTATE_0);
@ -1211,11 +1213,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
/* Need to resize the fb object !!! */
if (var->bits_per_pixel > fb->bits_per_pixel ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
/*
* Changes struct fb_var_screeninfo are currently not pushed back
* to KMS, hence fail if different settings are requested.
*/
if (var->bits_per_pixel != fb->bits_per_pixel ||
var->xres != fb->width || var->yres != fb->height ||
var->xres_virtual != fb->width || var->yres_virtual != fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp different than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
@ -1361,16 +1366,13 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
fail:
drm_atomic_clean_old_fb(dev, plane_mask, ret);
if (ret == -EDEADLK)
goto backoff;
if (ret != 0)
drm_atomic_state_free(state);
drm_atomic_state_put(state);
return ret;
backoff:

View File

@ -663,6 +663,10 @@ void drm_event_cancel_free(struct drm_device *dev,
list_del(&p->pending_link);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
if (p->fence)
fence_put(p->fence);
kfree(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);

View File

@ -102,83 +102,105 @@ char *drm_get_format_name(uint32_t format)
}
EXPORT_SYMBOL(drm_get_format_name);
/**
* drm_fb_get_bpp_depth - get the bpp/depth values for format
* @format: pixel format (DRM_FORMAT_*)
* @depth: storage for the depth value
* @bpp: storage for the bpp value
*
* This only supports RGB formats here for compat with code that doesn't use
* pixel formats directly yet.
/*
* Internal function to query information for a given format. See
* drm_format_info() for the public API.
*/
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
int *bpp)
const struct drm_format_info *__drm_format_info(u32 format)
{
char *format_name;
static const struct drm_format_info formats[] = {
{ .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ABGR4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRA4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ABGR1555, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRA5551, .depth = 15, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR565, .depth = 16, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR888, .depth = 24, .num_planes = 1, .cpp = { 3, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX8888, .depth = 24, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XBGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRX1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGRA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_YUV410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
{ .format = DRM_FORMAT_YVU410, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 4 },
{ .format = DRM_FORMAT_YUV411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
{ .format = DRM_FORMAT_YVU411, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 4, .vsub = 1 },
{ .format = DRM_FORMAT_YUV420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
{ .format = DRM_FORMAT_YVU420, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 2 },
{ .format = DRM_FORMAT_YUV422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_YVU422, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_YUV444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_YVU444, .depth = 0, .num_planes = 3, .cpp = { 1, 1, 1 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_NV12, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
{ .format = DRM_FORMAT_NV21, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 2 },
{ .format = DRM_FORMAT_NV16, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_NV61, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_NV24, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_NV42, .depth = 0, .num_planes = 2, .cpp = { 1, 2, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_YUYV, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_YVYU, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_UYVY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_VYUY, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 2, .vsub = 1 },
{ .format = DRM_FORMAT_AYUV, .depth = 0, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1 },
};
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
*bpp = 8;
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
*depth = 15;
*bpp = 16;
break;
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
*depth = 16;
*bpp = 16;
break;
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
*depth = 24;
*bpp = 24;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
*depth = 24;
*bpp = 32;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
*depth = 30;
*bpp = 32;
break;
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
*depth = 32;
*bpp = 32;
break;
default:
format_name = drm_get_format_name(format);
DRM_DEBUG_KMS("unsupported pixel format %s\n", format_name);
kfree(format_name);
*depth = 0;
*bpp = 0;
break;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); ++i) {
if (formats[i].format == format)
return &formats[i];
}
return NULL;
}
EXPORT_SYMBOL(drm_fb_get_bpp_depth);
/**
* drm_format_info - query information for a given format
* @format: pixel format (DRM_FORMAT_*)
*
* The caller should only pass a supported pixel format to this function.
* Unsupported pixel formats will generate a warning in the kernel log.
*
* Returns:
* The instance of struct drm_format_info that describes the pixel format, or
* NULL if the format is unsupported.
*/
const struct drm_format_info *drm_format_info(u32 format)
{
const struct drm_format_info *info;
info = __drm_format_info(format);
WARN_ON(!info);
return info;
}
EXPORT_SYMBOL(drm_format_info);
/**
* drm_format_num_planes - get the number of planes for format
@ -189,28 +211,10 @@ EXPORT_SYMBOL(drm_fb_get_bpp_depth);
*/
int drm_format_num_planes(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 3;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return 2;
default:
return 1;
}
const struct drm_format_info *info;
info = drm_format_info(format);
return info ? info->num_planes : 1;
}
EXPORT_SYMBOL(drm_format_num_planes);
@ -224,40 +228,13 @@ EXPORT_SYMBOL(drm_format_num_planes);
*/
int drm_format_plane_cpp(uint32_t format, int plane)
{
unsigned int depth;
int bpp;
const struct drm_format_info *info;
if (plane >= drm_format_num_planes(format))
info = drm_format_info(format);
if (!info || plane >= info->num_planes)
return 0;
switch (format) {
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
return 2;
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
return plane ? 2 : 1;
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 1;
default:
drm_fb_get_bpp_depth(format, &depth, &bpp);
return bpp >> 3;
}
return info->cpp[plane];
}
EXPORT_SYMBOL(drm_format_plane_cpp);
@ -271,28 +248,10 @@ EXPORT_SYMBOL(drm_format_plane_cpp);
*/
int drm_format_horz_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
return 2;
default:
return 1;
}
const struct drm_format_info *info;
info = drm_format_info(format);
return info ? info->hsub : 1;
}
EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
@ -306,18 +265,10 @@ EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
*/
int drm_format_vert_chroma_subsampling(uint32_t format)
{
switch (format) {
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
return 4;
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
return 2;
default:
return 1;
}
const struct drm_format_info *info;
info = drm_format_info(format);
return info ? info->vsub : 1;
}
EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
@ -332,13 +283,16 @@ EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
*/
int drm_format_plane_width(int width, uint32_t format, int plane)
{
if (plane >= drm_format_num_planes(format))
const struct drm_format_info *info;
info = drm_format_info(format);
if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return width;
return width / drm_format_horz_chroma_subsampling(format);
return width / info->hsub;
}
EXPORT_SYMBOL(drm_format_plane_width);
@ -353,12 +307,15 @@ EXPORT_SYMBOL(drm_format_plane_width);
*/
int drm_format_plane_height(int height, uint32_t format, int plane)
{
if (plane >= drm_format_num_planes(format))
const struct drm_format_info *info;
info = drm_format_info(format);
if (!info || plane >= info->num_planes)
return 0;
if (plane == 0)
return height;
return height / drm_format_vert_chroma_subsampling(format);
return height / info->vsub;
}
EXPORT_SYMBOL(drm_format_plane_height);

View File

@ -126,111 +126,33 @@ int drm_mode_addfb(struct drm_device *dev,
return 0;
}
static int format_check(const struct drm_mode_fb_cmd2 *r)
{
uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
char *format_name;
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
case DRM_FORMAT_XRGB4444:
case DRM_FORMAT_XBGR4444:
case DRM_FORMAT_RGBX4444:
case DRM_FORMAT_BGRX4444:
case DRM_FORMAT_ARGB4444:
case DRM_FORMAT_ABGR4444:
case DRM_FORMAT_RGBA4444:
case DRM_FORMAT_BGRA4444:
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_XBGR1555:
case DRM_FORMAT_RGBX5551:
case DRM_FORMAT_BGRX5551:
case DRM_FORMAT_ARGB1555:
case DRM_FORMAT_ABGR1555:
case DRM_FORMAT_RGBA5551:
case DRM_FORMAT_BGRA5551:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB888:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_RGBX8888:
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_ARGB2101010:
case DRM_FORMAT_ABGR2101010:
case DRM_FORMAT_RGBA1010102:
case DRM_FORMAT_BGRA1010102:
case DRM_FORMAT_YUYV:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_VYUY:
case DRM_FORMAT_AYUV:
case DRM_FORMAT_NV12:
case DRM_FORMAT_NV21:
case DRM_FORMAT_NV16:
case DRM_FORMAT_NV61:
case DRM_FORMAT_NV24:
case DRM_FORMAT_NV42:
case DRM_FORMAT_YUV410:
case DRM_FORMAT_YVU410:
case DRM_FORMAT_YUV411:
case DRM_FORMAT_YVU411:
case DRM_FORMAT_YUV420:
case DRM_FORMAT_YVU420:
case DRM_FORMAT_YUV422:
case DRM_FORMAT_YVU422:
case DRM_FORMAT_YUV444:
case DRM_FORMAT_YVU444:
return 0;
default:
format_name = drm_get_format_name(r->pixel_format);
DRM_DEBUG_KMS("invalid pixel format %s\n", format_name);
kfree(format_name);
return -EINVAL;
}
}
static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
{
int ret, hsub, vsub, num_planes, i;
const struct drm_format_info *info;
int i;
ret = format_check(r);
if (ret) {
info = __drm_format_info(r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN);
if (!info) {
char *format_name = drm_get_format_name(r->pixel_format);
DRM_DEBUG_KMS("bad framebuffer format %s\n", format_name);
kfree(format_name);
return ret;
return -EINVAL;
}
hsub = drm_format_horz_chroma_subsampling(r->pixel_format);
vsub = drm_format_vert_chroma_subsampling(r->pixel_format);
num_planes = drm_format_num_planes(r->pixel_format);
if (r->width == 0 || r->width % hsub) {
if (r->width == 0 || r->width % info->hsub) {
DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
return -EINVAL;
}
if (r->height == 0 || r->height % vsub) {
if (r->height == 0 || r->height % info->vsub) {
DRM_DEBUG_KMS("bad framebuffer height %u\n", r->height);
return -EINVAL;
}
for (i = 0; i < num_planes; i++) {
unsigned int width = r->width / (i != 0 ? hsub : 1);
unsigned int height = r->height / (i != 0 ? vsub : 1);
unsigned int cpp = drm_format_plane_cpp(r->pixel_format, i);
for (i = 0; i < info->num_planes; i++) {
unsigned int width = r->width / (i != 0 ? info->hsub : 1);
unsigned int height = r->height / (i != 0 ? info->vsub : 1);
unsigned int cpp = info->cpp[i];
if (!r->handles[i]) {
DRM_DEBUG_KMS("no buffer object handle for plane %d\n", i);
@ -273,7 +195,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
}
}
for (i = num_planes; i < 4; i++) {
for (i = info->num_planes; i < 4; i++) {
if (r->modifier[i]) {
DRM_DEBUG_KMS("non-zero modifier for unused plane %d\n", i);
return -EINVAL;

View File

@ -100,6 +100,9 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
int drm_debugfs_cleanup(struct drm_minor *minor);
int drm_debugfs_connector_add(struct drm_connector *connector);
void drm_debugfs_connector_remove(struct drm_connector *connector);
int drm_debugfs_crtc_add(struct drm_crtc *crtc);
void drm_debugfs_crtc_remove(struct drm_crtc *crtc);
int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc);
#else
static inline int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
@ -119,4 +122,17 @@ static inline int drm_debugfs_connector_add(struct drm_connector *connector)
static inline void drm_debugfs_connector_remove(struct drm_connector *connector)
{
}
static inline int drm_debugfs_crtc_add(struct drm_crtc *crtc)
{
return 0;
}
static inline void drm_debugfs_crtc_remove(struct drm_crtc *crtc)
{
}
static inline int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
return 0;
}
#endif

View File

@ -952,8 +952,10 @@ static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
u32 vblank_count;
unsigned int seq;
if (WARN_ON(pipe >= dev->num_crtcs))
if (WARN_ON(pipe >= dev->num_crtcs)) {
*vblanktime = (struct timeval) { 0 };
return 0;
}
do {
seq = read_seqbegin(&vblank->seqlock);

View File

@ -165,6 +165,7 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;
u64 tmp;
/* allocate the drm_display_mode structure. If failure, we will
* return directly
@ -322,8 +323,11 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
drm_mode->vsync_end = drm_mode->vsync_start + vsync;
}
/* 15/13. Find pixel clock frequency (kHz for xf86) */
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod;
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP;
tmp = drm_mode->htotal; /* perform intermediate calcs in u64 */
tmp *= HV_FACTOR * 1000;
do_div(tmp, hperiod);
tmp -= drm_mode->clock % CVT_CLOCK_STEP;
drm_mode->clock = tmp;
/* 18/16. Find actual vertical frame frequency */
/* ignore - just set the mode flag for interlaced */
if (interlaced) {
@ -997,6 +1001,7 @@ bool drm_mode_equal_no_clocks_no_stereo(const struct drm_display_mode *mode1,
mode1->vsync_end == mode2->vsync_end &&
mode1->vtotal == mode2->vtotal &&
mode1->vscan == mode2->vscan &&
mode1->picture_aspect_ratio == mode2->picture_aspect_ratio &&
(mode1->flags & ~DRM_MODE_FLAG_3D_MASK) ==
(mode2->flags & ~DRM_MODE_FLAG_3D_MASK))
return true;
@ -1499,6 +1504,27 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
out->vrefresh = in->vrefresh;
out->flags = in->flags;
out->type = in->type;
out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
switch (in->picture_aspect_ratio) {
case HDMI_PICTURE_ASPECT_4_3:
out->flags |= DRM_MODE_FLAG_PIC_AR_4_3;
break;
case HDMI_PICTURE_ASPECT_16_9:
out->flags |= DRM_MODE_FLAG_PIC_AR_16_9;
break;
case HDMI_PICTURE_ASPECT_64_27:
out->flags |= DRM_MODE_FLAG_PIC_AR_64_27;
break;
case DRM_MODE_PICTURE_ASPECT_256_135:
out->flags |= DRM_MODE_FLAG_PIC_AR_256_135;
break;
case HDMI_PICTURE_ASPECT_RESERVED:
default:
out->flags |= DRM_MODE_FLAG_PIC_AR_NONE;
break;
}
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
}
@ -1544,6 +1570,27 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
/* Clearing picture aspect ratio bits from out flags */
out->flags &= ~DRM_MODE_FLAG_PIC_AR_MASK;
switch (in->flags & DRM_MODE_FLAG_PIC_AR_MASK) {
case DRM_MODE_FLAG_PIC_AR_4_3:
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_4_3;
break;
case DRM_MODE_FLAG_PIC_AR_16_9:
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_16_9;
break;
case DRM_MODE_FLAG_PIC_AR_64_27:
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_64_27;
break;
case DRM_MODE_FLAG_PIC_AR_256_135:
out->picture_aspect_ratio |= HDMI_PICTURE_ASPECT_256_135;
break;
default:
out->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
break;
}
out->status = drm_mode_validate_basic(out);
if (out->status != MODE_OK)
goto out;

View File

@ -70,8 +70,23 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
const struct drm_mode_fb_cmd2 *mode_cmd)
{
const struct drm_format_info *info;
int i;
info = drm_format_info(mode_cmd->pixel_format);
if (!info || !info->depth) {
char *format_name = drm_get_format_name(mode_cmd->pixel_format);
DRM_DEBUG_KMS("non-RGB pixel format %s\n", format_name);
kfree(format_name);
fb->depth = 0;
fb->bits_per_pixel = 0;
} else {
fb->depth = info->depth;
fb->bits_per_pixel = info->cpp[0] * 8;
}
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
for (i = 0; i < 4; i++) {
@ -79,8 +94,6 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
fb->offsets[i] = mode_cmd->offsets[i];
fb->modifier[i] = mode_cmd->modifier[i];
}
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
&fb->bits_per_pixel);
fb->pixel_format = mode_cmd->pixel_format;
fb->flags = mode_cmd->flags;
}

View File

@ -409,20 +409,16 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct drm_device *dev = obj->dev;
bool write = !!(op & ETNA_PREP_WRITE);
int ret;
unsigned long remain =
op & ETNA_PREP_NOSYNC ? 0 : etnaviv_timeout_to_jiffies(timeout);
long lret;
if (op & ETNA_PREP_NOSYNC) {
if (!reservation_object_test_signaled_rcu(etnaviv_obj->resv,
write))
return -EBUSY;
} else {
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
ret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
write, true, remain);
if (ret <= 0)
return ret == 0 ? -ETIMEDOUT : ret;
}
lret = reservation_object_wait_timeout_rcu(etnaviv_obj->resv,
write, true, remain);
if (lret < 0)
return lret;
else if (lret == 0)
return remain == 0 ? -EBUSY : -ETIMEDOUT;
if (etnaviv_obj->flags & ETNA_BO_CACHED) {
if (!etnaviv_obj->sgt) {

View File

@ -69,7 +69,7 @@ static void exynos_atomic_commit_complete(struct exynos_atomic_commit *commit)
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
spin_lock(&priv->lock);
priv->pending &= ~commit->crtcs;
@ -254,6 +254,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else

View File

@ -124,7 +124,7 @@ static int psbfb_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
psbfb->gtt->offset;
page_num = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
page_num = vma_pages(vma);
address = (unsigned long)vmf->virtual_address - (vmf->pgoff << PAGE_SHIFT);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@ -236,22 +236,20 @@ static int psb_framebuffer_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
u32 bpp, depth;
const struct drm_format_info *info;
int ret;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
/*
* Reject unknown formats, YUV formats, and formats with more than
* 4 bytes per pixel.
*/
info = drm_format_info(mode_cmd->pixel_format);
if (!info || !info->depth || info->cpp[0] > 4)
return -EINVAL;
if (mode_cmd->pitches[0] & 63)
return -EINVAL;
switch (bpp) {
case 8:
case 16:
case 24:
case 32:
break;
default:
return -EINVAL;
}
drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd);
fb->gtt = gt;
ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs);
@ -298,7 +296,6 @@ static struct drm_framebuffer *psb_framebuffer_create
* psbfb_alloc - allocate frame buffer memory
* @dev: the DRM device
* @aligned_size: space needed
* @force: fall back to GEM buffers if need be
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient

View File

@ -76,6 +76,7 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
* psb_gtt_insert - put an object into the GTT
* @dev: our DRM device
* @r: our GTT range
* @resume: on resume
*
* Take our preallocated GTT range and insert the GEM object into
* the GTT. This is protected via the gtt mutex which the caller
@ -321,6 +322,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
* @len: length (bytes) of address space required
* @name: resource name
* @backed: resource should be backed by stolen pages
* @align: requested alignment
*
* Ask the kernel core to find us a suitable range of addresses
* to use for a GTT mapping.

View File

@ -3941,10 +3941,9 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_i915_private *dev_priv,
ret = drm_atomic_commit(state);
out:
drm_modeset_unlock_all(dev);
WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
if (ret)
drm_atomic_state_free(state);
drm_modeset_unlock_all(dev);
drm_atomic_state_put(state);
}
static int ivb_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,

View File

@ -142,8 +142,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.y2 =
crtc_state->base.enable ? crtc_state->pipe_src_h : 0;
if (state->fb && intel_rotation_90_or_270(state->rotation)) {
if (state->fb && drm_rotation_90_or_270(state->rotation)) {
char *format_name;
if (!(state->fb->modifier[0] == I915_FORMAT_MOD_Y_TILED ||
state->fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED)) {
DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");

View File

@ -2139,7 +2139,7 @@ intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
const struct drm_framebuffer *fb,
unsigned int rotation)
{
if (intel_rotation_90_or_270(rotation)) {
if (drm_rotation_90_or_270(rotation)) {
*view = i915_ggtt_view_rotated;
view->params.rotated = to_intel_framebuffer(fb)->rot_info;
} else {
@ -2260,7 +2260,7 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
unsigned int rotation)
{
if (intel_rotation_90_or_270(rotation))
if (drm_rotation_90_or_270(rotation))
return to_intel_framebuffer(fb)->rotated[plane].pitch;
else
return fb->pitches[plane];
@ -2296,7 +2296,7 @@ void intel_add_fb_offsets(int *x, int *y,
const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb);
unsigned int rotation = state->base.rotation;
if (intel_rotation_90_or_270(rotation)) {
if (drm_rotation_90_or_270(rotation)) {
*x += intel_fb->rotated[plane].x;
*y += intel_fb->rotated[plane].y;
} else {
@ -2360,7 +2360,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb->modifier[plane], cpp);
if (intel_rotation_90_or_270(rotation)) {
if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@ -2416,7 +2416,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv,
intel_tile_dims(dev_priv, &tile_width, &tile_height,
fb_modifier, cpp);
if (intel_rotation_90_or_270(rotation)) {
if (drm_rotation_90_or_270(rotation)) {
pitch_tiles = pitch / tile_height;
swap(tile_width, tile_height);
} else {
@ -2976,7 +2976,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
int ret;
/* Rotate src coordinates to match rotated GTT view */
if (intel_rotation_90_or_270(rotation))
if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
fb->width, fb->height, DRM_ROTATE_270);
@ -3276,7 +3276,7 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane,
* The stride is either expressed as a multiple of 64 bytes chunks for
* linear buffers or in number of tiles for tiled buffers.
*/
if (intel_rotation_90_or_270(rotation)) {
if (drm_rotation_90_or_270(rotation)) {
int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
stride /= intel_tile_height(dev_priv, fb->modifier[0], cpp);
@ -3584,7 +3584,7 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
return;
err:
drm_atomic_state_free(state);
drm_atomic_state_put(state);
}
void intel_finish_reset(struct drm_i915_private *dev_priv)
@ -3646,6 +3646,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
intel_hpd_init(dev_priv);
}
if (state)
drm_atomic_state_put(state);
drm_modeset_drop_locks(ctx);
drm_modeset_acquire_fini(ctx);
mutex_unlock(&dev->mode_config.mutex);
@ -4667,7 +4669,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc);
int need_scaling;
need_scaling = intel_rotation_90_or_270(rotation) ?
need_scaling = drm_rotation_90_or_270(rotation) ?
(src_h != dst_w || src_w != dst_h):
(src_w != dst_w || src_h != dst_h);
@ -6885,7 +6887,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
dev_priv->display.crtc_disable(crtc_state, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
crtc->base.id, crtc->name);
@ -11270,9 +11272,14 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
return true;
fail:
drm_atomic_state_free(state);
drm_atomic_state_free(restore_state);
restore_state = state = NULL;
if (state) {
drm_atomic_state_put(state);
state = NULL;
}
if (restore_state) {
drm_atomic_state_put(restore_state);
restore_state = NULL;
}
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@ -11300,10 +11307,9 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
return;
ret = drm_atomic_commit(state);
if (ret) {
if (ret)
DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_free(state);
}
drm_atomic_state_put(state);
}
static int i9xx_pll_refclk(struct drm_device *dev,
@ -12371,8 +12377,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto retry;
}
if (ret)
drm_atomic_state_free(state);
drm_atomic_state_put(state);
if (ret == 0 && event) {
spin_lock_irq(&dev->event_lock);
@ -14457,7 +14462,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_cleanup_done(state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
/* As one of the primary mmio accessors, KMS has a high likelihood
* of triggering bugs in unclaimed access. After we finish
@ -14540,6 +14545,7 @@ static int intel_atomic_commit(struct drm_device *dev,
intel_shared_dpll_commit(state);
intel_atomic_track_fbs(state);
drm_atomic_state_get(state);
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
@ -14581,9 +14587,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
goto retry;
}
if (ret)
out:
drm_atomic_state_free(state);
drm_atomic_state_put(state);
}
/*
@ -14901,6 +14906,7 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
struct intel_plane *primary = NULL;
struct intel_plane_state *state = NULL;
const uint32_t *intel_primary_formats;
unsigned int supported_rotations;
unsigned int num_formats;
int ret;
@ -14973,8 +14979,21 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
if (ret)
goto fail;
if (INTEL_INFO(dev)->gen >= 4)
intel_create_rotation_property(dev, primary);
if (INTEL_GEN(dev) >= 9) {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270;
} else if (INTEL_GEN(dev) >= 4) {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_180;
} else {
supported_rotations = DRM_ROTATE_0;
}
if (INTEL_GEN(dev) >= 4)
drm_plane_create_rotation_property(&primary->base,
DRM_ROTATE_0,
supported_rotations);
drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
@ -14987,24 +15006,6 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
return NULL;
}
void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
{
if (!dev->mode_config.rotation_property) {
unsigned long flags = DRM_ROTATE_0 |
DRM_ROTATE_180;
if (INTEL_INFO(dev)->gen >= 9)
flags |= DRM_ROTATE_90 | DRM_ROTATE_270;
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev, flags);
}
if (dev->mode_config.rotation_property)
drm_object_attach_property(&plane->base.base,
dev->mode_config.rotation_property,
plane->base.state->rotation);
}
static int
intel_check_cursor_plane(struct drm_plane *plane,
struct intel_crtc_state *crtc_state,
@ -15131,17 +15132,11 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
if (ret)
goto fail;
if (INTEL_INFO(dev)->gen >= 4) {
if (!dev->mode_config.rotation_property)
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
DRM_ROTATE_0 |
DRM_ROTATE_180);
if (dev->mode_config.rotation_property)
drm_object_attach_property(&cursor->base.base,
dev->mode_config.rotation_property,
state->base.rotation);
}
if (INTEL_GEN(dev) >= 4)
drm_plane_create_rotation_property(&cursor->base,
DRM_ROTATE_0,
DRM_ROTATE_0 |
DRM_ROTATE_180);
if (INTEL_INFO(dev)->gen >=9)
state->scaler_id = -1;
@ -16314,7 +16309,7 @@ static void sanitize_watermarks(struct drm_device *dev)
* BIOS-programmed watermarks untouched and hope for the best.
*/
WARN(true, "Could not determine valid watermarks for inherited state\n");
goto fail;
goto put_state;
}
/* Write calculated watermark values back */
@ -16325,7 +16320,8 @@ static void sanitize_watermarks(struct drm_device *dev)
dev_priv->display.optimize_watermarks(cs);
}
drm_atomic_state_free(state);
put_state:
drm_atomic_state_put(state);
fail:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@ -16963,10 +16959,9 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
mutex_unlock(&dev->mode_config.mutex);
if (ret) {
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
drm_atomic_state_free(state);
}
drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)

View File

@ -1285,15 +1285,6 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
uint64_t fb_modifier, unsigned int cpp);
static inline bool
intel_rotation_90_or_270(unsigned int rotation)
{
return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
}
void intel_create_rotation_property(struct drm_device *dev,
struct intel_plane *plane);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);

View File

@ -84,7 +84,7 @@ static void intel_fbc_get_plane_source_size(struct intel_fbc_state_cache *cache,
{
int w, h;
if (intel_rotation_90_or_270(cache->plane.rotation)) {
if (drm_rotation_90_or_270(cache->plane.rotation)) {
w = cache->plane.src_h;
h = cache->plane.src_w;
} else {

View File

@ -3173,7 +3173,7 @@ skl_plane_downscale_amount(const struct intel_plane_state *pstate)
src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst);
if (intel_rotation_90_or_270(pstate->base.rotation))
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@ -3204,7 +3204,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
if (intel_rotation_90_or_270(pstate->rotation))
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */
@ -3304,7 +3304,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
if (intel_rotation_90_or_270(pstate->rotation))
if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */
@ -3318,7 +3318,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
else
plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
if (intel_rotation_90_or_270(pstate->rotation)) {
if (drm_rotation_90_or_270(pstate->rotation)) {
switch (plane_bpp) {
case 1:
min_scanlines = 32;
@ -3562,13 +3562,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16;
if (intel_rotation_90_or_270(pstate->rotation))
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = drm_format_plane_cpp(fb->pixel_format, 0);
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
if (intel_rotation_90_or_270(pstate->rotation)) {
if (drm_rotation_90_or_270(pstate->rotation)) {
int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
drm_format_plane_cpp(fb->pixel_format, 1) :
drm_format_plane_cpp(fb->pixel_format, 0);

View File

@ -987,9 +987,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
drm_modeset_backoff(&ctx);
}
if (ret)
drm_atomic_state_free(state);
drm_atomic_state_put(state);
out:
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
@ -1046,6 +1044,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
struct intel_plane_state *state = NULL;
unsigned long possible_crtcs;
const uint32_t *plane_formats;
unsigned int supported_rotations;
int num_plane_formats;
int ret;
@ -1121,6 +1120,15 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
goto fail;
}
if (INTEL_GEN(dev) >= 9) {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270;
} else {
supported_rotations =
DRM_ROTATE_0 | DRM_ROTATE_180;
}
intel_plane->pipe = pipe;
intel_plane->plane = plane;
intel_plane->frontbuffer_bit = INTEL_FRONTBUFFER_SPRITE(pipe, plane);
@ -1143,7 +1151,9 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
if (ret)
goto fail;
intel_create_rotation_property(dev, intel_plane);
drm_plane_create_rotation_property(&intel_plane->base,
DRM_ROTATE_0,
supported_rotations);
drm_plane_helper_add(&intel_plane->base, &intel_plane_helper_funcs);

View File

@ -83,7 +83,7 @@ static void mtk_atomic_complete(struct mtk_drm_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
}
static void mtk_atomic_work(struct work_struct *work)
@ -110,6 +110,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (async)
mtk_atomic_schedule(private, state);
else

View File

@ -141,7 +141,7 @@ static void complete_commit(struct msm_commit *c, bool async)
kms->funcs->complete_commit(kms, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
commit_destroy(c);
}
@ -256,6 +256,7 @@ int msm_atomic_commit(struct drm_device *dev,
* current layout.
*/
drm_atomic_state_get(state);
if (nonblock) {
queue_work(priv->atomic_wq, &c->work);
return 0;

View File

@ -861,6 +861,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE);
long lret;
int ret;
gem = drm_gem_object_lookup(file_priv, req->handle);
@ -868,19 +869,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
return -ENOENT;
nvbo = nouveau_gem_object(gem);
if (no_wait)
ret = reservation_object_test_signaled_rcu(nvbo->bo.resv, write) ? 0 : -EBUSY;
else {
long lret;
lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true,
no_wait ? 0 : 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
ret = 0;
else
ret = lret;
lret = reservation_object_wait_timeout_rcu(nvbo->bo.resv, write, true, 30 * HZ);
if (!lret)
ret = -EBUSY;
else if (lret > 0)
ret = 0;
else
ret = lret;
}
nouveau_bo_sync_for_cpu(nvbo);
drm_gem_object_unreference_unlocked(gem);

View File

@ -438,13 +438,14 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
static bool omap_crtc_is_plane_prop(struct drm_device *dev,
static bool omap_crtc_is_plane_prop(struct drm_crtc *crtc,
struct drm_property *property)
{
struct drm_device *dev = crtc->dev;
struct omap_drm_private *priv = dev->dev_private;
return property == priv->zorder_prop ||
property == dev->mode_config.rotation_property;
property == crtc->primary->rotation_property;
}
static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
@ -452,9 +453,7 @@ static int omap_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = crtc->dev;
if (omap_crtc_is_plane_prop(dev, property)) {
if (omap_crtc_is_plane_prop(crtc, property)) {
struct drm_plane_state *plane_state;
struct drm_plane *plane = crtc->primary;
@ -479,9 +478,7 @@ static int omap_crtc_atomic_get_property(struct drm_crtc *crtc,
struct drm_property *property,
uint64_t *val)
{
struct drm_device *dev = crtc->dev;
if (omap_crtc_is_plane_prop(dev, property)) {
if (omap_crtc_is_plane_prop(crtc, property)) {
/*
* Delegate property get to the primary plane. The
* drm_atomic_plane_get_property() function isn't exported, but

View File

@ -105,7 +105,7 @@ static void omap_atomic_complete(struct omap_atomic_state_commit *commit)
dispc_runtime_put();
drm_atomic_state_free(old_state);
drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&priv->commit.lock);
@ -176,6 +176,7 @@ static int omap_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else
@ -292,16 +293,6 @@ static int omap_modeset_init_properties(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
dev->mode_config.rotation_property =
drm_mode_create_rotation_property(dev,
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270 |
DRM_REFLECT_X | DRM_REFLECT_Y);
if (!dev->mode_config.rotation_property)
return -ENOMEM;
}
priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, 3);
if (!priv->zorder_prop)
return -ENOMEM;
@ -752,22 +743,32 @@ static void dev_lastclose(struct drm_device *dev)
DBG("lastclose: dev=%p", dev);
if (dev->mode_config.rotation_property) {
/* need to restore default rotation state.. not sure
* if there is a cleaner way to restore properties to
* default state? Maybe a flag that properties should
* automatically be restored to default state on
* lastclose?
*/
for (i = 0; i < priv->num_crtcs; i++) {
drm_object_property_set_value(&priv->crtcs[i]->base,
dev->mode_config.rotation_property, 0);
}
/* need to restore default rotation state.. not sure
* if there is a cleaner way to restore properties to
* default state? Maybe a flag that properties should
* automatically be restored to default state on
* lastclose?
*/
for (i = 0; i < priv->num_crtcs; i++) {
struct drm_crtc *crtc = priv->crtcs[i];
for (i = 0; i < priv->num_planes; i++) {
drm_object_property_set_value(&priv->planes[i]->base,
dev->mode_config.rotation_property, 0);
}
if (!crtc->primary->rotation_property)
continue;
drm_object_property_set_value(&crtc->base,
crtc->primary->rotation_property,
DRM_ROTATE_0);
}
for (i = 0; i < priv->num_planes; i++) {
struct drm_plane *plane = priv->planes[i];
if (!plane->rotation_property)
continue;
drm_object_property_set_value(&plane->base,
plane->rotation_property,
DRM_ROTATE_0);
}
if (priv->fbdev) {

View File

@ -108,16 +108,12 @@ static void omap_plane_atomic_update(struct drm_plane *plane,
win.src_x = state->src_x >> 16;
win.src_y = state->src_y >> 16;
switch (state->rotation & DRM_ROTATE_MASK) {
case DRM_ROTATE_90:
case DRM_ROTATE_270:
if (drm_rotation_90_or_270(state->rotation)) {
win.src_w = state->src_h >> 16;
win.src_h = state->src_w >> 16;
break;
default:
} else {
win.src_w = state->src_w >> 16;
win.src_h = state->src_h >> 16;
break;
}
/* update scanout: */
@ -215,9 +211,17 @@ void omap_plane_install_properties(struct drm_plane *plane,
struct omap_drm_private *priv = dev->dev_private;
if (priv->has_dmm) {
struct drm_property *prop = dev->mode_config.rotation_property;
if (!plane->rotation_property)
drm_plane_create_rotation_property(plane,
DRM_ROTATE_0,
DRM_ROTATE_0 | DRM_ROTATE_90 |
DRM_ROTATE_180 | DRM_ROTATE_270 |
DRM_REFLECT_X | DRM_REFLECT_Y);
drm_object_attach_property(obj, prop, 0);
/* Attach the rotation property also to the crtc object */
if (plane->rotation_property && obj != &plane->base)
drm_object_attach_property(obj, plane->rotation_property,
DRM_ROTATE_0);
}
drm_object_attach_property(obj, priv->zorder_prop, 0);

View File

@ -89,13 +89,13 @@ static struct fb_ops radeonfb_ops = {
};
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled)
int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
{
int aligned = width;
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled;
int pitch_mask = 0;
switch (bpp / 8) {
switch (cpp) {
case 1:
pitch_mask = align_large ? 255 : 127;
break;
@ -110,7 +110,7 @@ int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tile
aligned += pitch_mask;
aligned &= ~pitch_mask;
return aligned;
return aligned * cpp;
}
static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
@ -139,13 +139,13 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
u32 cpp;
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
/* need to align pitch with crtc limits */
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, cpp,
fb_tiled);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
@ -165,11 +165,11 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
switch (bpp) {
case 32:
switch (cpp) {
case 4:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
case 16:
case 2:
tiling_flags |= RADEON_TILING_SWAP_16BIT;
default:
break;

View File

@ -745,7 +745,8 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
uint32_t handle;
int r;
args->pitch = radeon_align_pitch(rdev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
args->pitch = radeon_align_pitch(rdev, args->width,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);

View File

@ -264,7 +264,7 @@ static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
drm_atomic_helper_cleanup_planes(dev, old_state);
drm_atomic_state_free(old_state);
drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&rcdu->commit.wait.lock);
@ -330,6 +330,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
/* Swap the state, this is the point of no return. */
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (nonblock)
schedule_work(&commit->work);
else

View File

@ -1,7 +1,6 @@
config DRM_ROCKCHIP
tristate "DRM Support for Rockchip"
depends on DRM && ROCKCHIP_IOMMU
depends on RESET_CONTROLLER
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_PANEL

View File

@ -1004,6 +1004,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
kvb_addr = memdup_user(cmdbuf->vb_addr, cmdbuf->vb_size);
if (IS_ERR(kvb_addr)) {
ret = PTR_ERR(kvb_addr);
kvb_addr = NULL;
goto done;
}
cmdbuf->vb_addr = kvb_addr;

View File

@ -184,7 +184,7 @@ static void sti_atomic_complete(struct sti_private *private,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
}
static void sti_atomic_work(struct work_struct *work)
@ -217,6 +217,7 @@ static int sti_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (nonblock)
sti_atomic_schedule(private, state);
else

View File

@ -3,7 +3,6 @@ config DRM_TEGRA
depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
depends on COMMON_CLK
depends on DRM
depends on RESET_CONTROLLER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL

View File

@ -63,7 +63,7 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
}
static void tegra_atomic_work(struct work_struct *work)
@ -96,6 +96,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
drm_atomic_helper_swap_state(state, true);
drm_atomic_state_get(state);
if (nonblock)
tegra_atomic_schedule(tegra, state);
else

View File

@ -72,16 +72,14 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_gem_cma_object *gem;
unsigned int depth, bpp;
dma_addr_t start, end;
u64 dma_base_and_ceiling;
drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
gem = drm_fb_cma_get_gem_obj(fb, 0);
start = gem->paddr + fb->offsets[0] +
crtc->y * fb->pitches[0] +
crtc->x * bpp / 8;
crtc->x * drm_format_plane_cpp(fb->pixel_format, 0);
end = start + (crtc->mode.vdisplay * fb->pitches[0]);
@ -461,16 +459,16 @@ static void tilcdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
if (info->tft_alt_mode)
reg |= LCDC_TFT_ALT_ENABLE;
if (priv->rev == 2) {
unsigned int depth, bpp;
drm_fb_get_bpp_depth(fb->pixel_format, &depth, &bpp);
switch (bpp) {
case 16:
switch (fb->pixel_format) {
case DRM_FORMAT_BGR565:
case DRM_FORMAT_RGB565:
break;
case 32:
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_XRGB8888:
reg |= LCDC_V2_TFT_24BPP_UNPACK;
/* fallthrough */
case 24:
case DRM_FORMAT_BGR888:
case DRM_FORMAT_RGB888:
reg |= LCDC_V2_TFT_24BPP_MODE;
break;
default:

View File

@ -143,8 +143,6 @@ static int tilcdc_commit(struct drm_device *dev,
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
return 0;
}

View File

@ -39,7 +39,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc_state *crtc_state;
struct drm_plane_state *old_state = plane->state;
unsigned int depth, bpp;
unsigned int pitch;
if (!state->crtc)
return 0;
@ -68,8 +68,9 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
drm_fb_get_bpp_depth(state->fb->pixel_format, &depth, &bpp);
if (state->fb->pitches[0] != crtc_state->mode.hdisplay * bpp / 8) {
pitch = crtc_state->mode.hdisplay *
drm_format_plane_cpp(state->fb->pixel_format, 0);
if (state->fb->pitches[0] != pitch) {
dev_err(plane->dev->dev,
"Invalid pitch: fb and crtc widths must be the same");
return -EINVAL;

View File

@ -61,7 +61,7 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
drm_atomic_state_put(state);
up(&vc4->async_modeset);
@ -173,6 +173,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
* current layout.
*/
drm_atomic_state_get(state);
if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);

View File

@ -1,10 +1,10 @@
config DRM_VIRTIO_GPU
tristate "Virtio GPU driver"
depends on DRM && VIRTIO
select DRM_KMS_HELPER
select DRM_TTM
select DRM_KMS_HELPER
select DRM_TTM
help
This is the virtual GPU driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen).
QEMU based VMMs (like KVM or Xen).
If unsure say M.

View File

@ -980,14 +980,22 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
struct drm_mode_fb_cmd mode_cmd;
const struct drm_format_info *info;
int ret;
info = drm_format_info(mode_cmd2->pixel_format);
if (!info || !info->depth) {
DRM_ERROR("Unsupported framebuffer format %s\n",
drm_get_format_name(mode_cmd2->pixel_format));
return ERR_PTR(-EINVAL);
}
mode_cmd.width = mode_cmd2->width;
mode_cmd.height = mode_cmd2->height;
mode_cmd.pitch = mode_cmd2->pitches[0];
mode_cmd.handle = mode_cmd2->handles[0];
drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
&mode_cmd.bpp);
mode_cmd.depth = info->depth;
mode_cmd.bpp = info->cpp[0] * 8;
/**
* This code should be conditioned on Screen Objects not being used.

View File

@ -575,7 +575,7 @@ static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
long lret;
lret = reservation_object_wait_timeout_rcu(bo->resv, true, true,
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
nonblock ? 0 : MAX_SCHEDULE_TIMEOUT);
if (!lret)
return -EBUSY;
else if (lret < 0)

View File

@ -1,7 +1,6 @@
config IMX_IPUV3_CORE
tristate "IPUv3 core support"
depends on SOC_IMX5 || SOC_IMX6Q || ARCH_MULTIPLATFORM
depends on RESET_CONTROLLER
select GENERIC_IRQ_CHIP
help
Choose this if you have a i.MX5/6 system and want to use the Image

View File

@ -1022,21 +1022,16 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
unsigned int io_state;
char *kbuf, *curr_pos;
char kbuf[64], *curr_pos;
size_t remaining = count;
int ret_val;
int i;
kbuf = kmalloc(count + 1, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
if (copy_from_user(kbuf, buf, count)) {
kfree(kbuf);
if (count >= sizeof(kbuf))
return -EINVAL;
if (copy_from_user(kbuf, buf, count))
return -EFAULT;
}
curr_pos = kbuf;
kbuf[count] = '\0'; /* Just to make sure... */
@ -1259,11 +1254,9 @@ static ssize_t vga_arb_write(struct file *file, const char __user *buf,
goto done;
}
/* If we got here, the message written is not part of the protocol! */
kfree(kbuf);
return -EPROTO;
done:
kfree(kbuf);
return ret_val;
}

View File

@ -533,6 +533,10 @@ hdmi_picture_aspect_get_name(enum hdmi_picture_aspect picture_aspect)
return "4:3";
case HDMI_PICTURE_ASPECT_16_9:
return "16:9";
case HDMI_PICTURE_ASPECT_64_27:
return "64:27";
case HDMI_PICTURE_ASPECT_256_135:
return "256:135";
case HDMI_PICTURE_ASPECT_RESERVED:
return "Reserved";
}

View File

@ -153,6 +153,7 @@ struct __drm_connnectors_state {
/**
* struct drm_atomic_state - the global state object for atomic updates
* @ref: count of all references to this state (will not be freed until zero)
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
@ -164,6 +165,8 @@ struct __drm_connnectors_state {
* @acquire_ctx: acquire context for this atomic modeset state update
*/
struct drm_atomic_state {
struct kref ref;
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
@ -193,7 +196,33 @@ static inline void drm_crtc_commit_get(struct drm_crtc_commit *commit)
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
void drm_atomic_state_free(struct drm_atomic_state *state);
/**
* drm_atomic_state_get - acquire a reference to the atomic state
* @state: The atomic state
*
* Returns a new reference to the @state
*/
static inline struct drm_atomic_state *
drm_atomic_state_get(struct drm_atomic_state *state)
{
kref_get(&state->ref);
return state;
}
void __drm_atomic_state_free(struct kref *ref);
/**
* drm_atomic_state_put - release a reference to the atomic state
* @state: The atomic state
*
* This releases a reference to @state which is freed after removing the
* final reference. No locking required and callable from any context.
*/
static inline void drm_atomic_state_put(struct drm_atomic_state *state)
{
kref_put(&state->ref, __drm_atomic_state_free);
}
int __must_check
drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state);
@ -365,8 +394,17 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
*
* To give drivers flexibility struct &drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
* connectors_changed, mode_changed and active_change. This helper simply
* connectors_changed, mode_changed and active_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
*
* The atomic helper code sets these booleans, but drivers can and should
* change them appropriately to accurately represent whether a modeset is
* really needed. In general, drivers should avoid full modesets whenever
* possible.
*
* For example if the CRTC mode has changed, and the hardware is able to enact
* the requested mode change without going through a full modeset, the driver
* should clear mode_changed during its ->atomic_check.
*/
static inline bool
drm_atomic_crtc_needs_modeset(struct drm_crtc_state *state)

View File

@ -47,8 +47,16 @@ struct drm_atomic_state;
#define DRM_REFLECT_Y BIT(5)
#define DRM_REFLECT_MASK (DRM_REFLECT_X | DRM_REFLECT_Y)
static inline bool drm_rotation_90_or_270(unsigned int rotation)
{
return rotation & (DRM_ROTATE_90 | DRM_ROTATE_270);
}
struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
unsigned int supported_rotations);
int drm_plane_create_rotation_property(struct drm_plane *plane,
unsigned int rotation,
unsigned int supported_rotations);
unsigned int drm_rotation_simplify(unsigned int rotation,
unsigned int supported_rotations);

View File

@ -47,6 +47,7 @@
#include <drm/drm_plane.h>
#include <drm/drm_blend.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_debugfs_crc.h>
struct drm_device;
struct drm_mode_set;
@ -116,6 +117,11 @@ struct drm_plane_helper_funcs;
* never return in a failure from the ->atomic_check callback. Userspace assumes
* that a DPMS On will always succeed. In other words: @enable controls resource
* assignment, @active controls the actual hardware state.
*
* The three booleans active_changed, connectors_changed and mode_changed are
* intended to indicate whether a full modeset is needed, rather than strictly
* describing what has changed in a commit.
* See also: drm_atomic_crtc_needs_modeset()
*/
struct drm_crtc_state {
struct drm_crtc *crtc;
@ -564,6 +570,30 @@ struct drm_crtc_funcs {
* before data structures are torndown.
*/
void (*early_unregister)(struct drm_crtc *crtc);
/**
* @set_crc_source:
*
* Changes the source of CRC checksums of frames at the request of
* userspace, typically for testing purposes. The sources available are
* specific of each driver and a %NULL value indicates that CRC
* generation is to be switched off.
*
* When CRC generation is enabled, the driver should call
* drm_crtc_add_crc_entry() at each frame, providing any information
* that characterizes the frame contents in the crcN arguments, as
* provided from the configured source. Drivers must accept a "auto"
* source name that will select a default source for this CRTC.
*
* This callback is optional if the driver does not support any CRC
* generation functionality.
*
* RETURNS:
*
* 0 on success or a negative error code on failure.
*/
int (*set_crc_source)(struct drm_crtc *crtc, const char *source,
size_t *values_cnt);
};
/**
@ -680,6 +710,22 @@ struct drm_crtc {
* context.
*/
struct drm_modeset_acquire_ctx *acquire_ctx;
#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
*
* Debugfs directory for this CRTC.
*/
struct dentry *debugfs_entry;
/**
* @crc:
*
* Configuration settings of CRC capture.
*/
struct drm_crtc_crc crc;
#endif
};
/**
@ -1354,7 +1400,7 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* Given a registered CRTC, return the mask bit of that CRTC for an
* encoder's possible_crtcs field.
*/
static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc)
static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
return 1 << drm_crtc_index(crtc);
}

View File

@ -0,0 +1,73 @@
/*
* Copyright © 2016 Collabora Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __DRM_DEBUGFS_CRC_H__
#define __DRM_DEBUGFS_CRC_H__
#define DRM_MAX_CRC_NR 10
/**
* struct drm_crtc_crc_entry - entry describing a frame's content
* @has_frame_counter: whether the source was able to provide a frame number
* @frame: number of the frame this CRC is about, if @has_frame_counter is true
* @crc: array of values that characterize the frame
*/
struct drm_crtc_crc_entry {
bool has_frame_counter;
uint32_t frame;
uint32_t crcs[DRM_MAX_CRC_NR];
};
#define DRM_CRC_ENTRIES_NR 128
/**
* struct drm_crtc_crc - data supporting CRC capture on a given CRTC
* @lock: protects the fields in this struct
* @source: name of the currently configured source of CRCs
* @opened: whether userspace has opened the data file for reading
* @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
* @head: head of circular queue
* @tail: tail of circular queue
* @values_cnt: number of CRC values per entry, up to %DRM_MAX_CRC_NR
* @wq: workqueue used to synchronize reading and writing
*/
struct drm_crtc_crc {
spinlock_t lock;
const char *source;
bool opened;
struct drm_crtc_crc_entry *entries;
int head, tail;
size_t values_cnt;
wait_queue_head_t wq;
};
#if defined(CONFIG_DEBUG_FS)
int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
uint32_t frame, uint32_t *crcs);
#else
static inline int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
uint32_t frame, uint32_t *crcs)
{
return -EINVAL;
}
#endif /* defined(CONFIG_DEBUG_FS) */
#endif /* __DRM_DEBUGFS_CRC_H__ */

View File

@ -189,7 +189,7 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
}
/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */
static inline uint32_t drm_crtc_mask(struct drm_crtc *crtc);
static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc);
/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?

View File

@ -25,8 +25,29 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
/**
* struct drm_format_info - information about a DRM format
* @format: 4CC format identifier (DRM_FORMAT_*)
* @depth: Color depth (number of bits per pixel excluding padding bits),
* valid for a subset of RGB formats only. This is a legacy field, do not
* use in new code and set to 0 for new formats.
* @num_planes: Number of color planes (1 to 3)
* @cpp: Number of bytes per pixel (per plane)
* @hsub: Horizontal chroma subsampling factor
* @vsub: Vertical chroma subsampling factor
*/
struct drm_format_info {
u32 format;
u8 depth;
u8 num_planes;
u8 cpp[3];
u8 hsub;
u8 vsub;
};
const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth, int *bpp);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
int drm_format_horz_chroma_subsampling(uint32_t format);

View File

@ -88,7 +88,6 @@ struct drm_plane_state {
struct drm_atomic_state *state;
};
/**
* struct drm_plane_funcs - driver plane control functions
*/
@ -386,6 +385,7 @@ enum drm_plane_type {
* @type: type of plane (overlay, primary, cursor)
* @state: current atomic state for this plane
* @zpos_property: zpos property for this plane
* @rotation_property: rotation property for this plane
* @helper_private: mid-layer private data
*/
struct drm_plane {
@ -432,6 +432,7 @@ struct drm_plane {
struct drm_plane_state *state;
struct drm_property *zpos_property;
struct drm_property *rotation_property;
};
#define obj_to_plane(x) container_of(x, struct drm_plane, base)

View File

@ -182,6 +182,16 @@ void fence_init(struct fence *fence, const struct fence_ops *ops,
void fence_release(struct kref *kref);
void fence_free(struct fence *fence);
/**
* fence_put - decreases refcount of the fence
* @fence: [in] fence to reduce refcount of
*/
static inline void fence_put(struct fence *fence)
{
if (fence)
kref_put(&fence->refcount, fence_release);
}
/**
* fence_get - increases refcount of the fence
* @fence: [in] fence to increase refcount of
@ -210,13 +220,49 @@ static inline struct fence *fence_get_rcu(struct fence *fence)
}
/**
* fence_put - decreases refcount of the fence
* @fence: [in] fence to reduce refcount of
* fence_get_rcu_safe - acquire a reference to an RCU tracked fence
* @fence: [in] pointer to fence to increase refcount of
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
* reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
* fences, such as used by struct reservation_object. When using a seqlock,
* the seqlock must be taken before and checked after a reference to the
* fence is acquired (as shown here).
*
* The caller is required to hold the RCU read lock.
*/
static inline void fence_put(struct fence *fence)
static inline struct fence *fence_get_rcu_safe(struct fence * __rcu *fencep)
{
if (fence)
kref_put(&fence->refcount, fence_release);
do {
struct fence *fence;
fence = rcu_dereference(*fencep);
if (!fence || !fence_get_rcu(fence))
return NULL;
/* The atomic_inc_not_zero() inside fence_get_rcu()
* provides a full memory barrier upon success (such as now).
* This is paired with the write barrier from assigning
* to the __rcu protected fence pointer so that if that
* pointer still matches the current fence, we know we
* have successfully acquire a reference to it. If it no
* longer matches, we are holding a reference to some other
* reallocated pointer. This is possible if the allocator
* is using a freelist like SLAB_DESTROY_BY_RCU where the
* fence remains valid for the RCU grace period, but it
* may be reallocated. When using such allocators, we are
* responsible for ensuring the reference we get is to
* the right fence, as below.
*/
if (fence == rcu_access_pointer(*fencep))
return rcu_pointer_handoff(fence);
fence_put(fence);
} while (1);
}
int fence_signal(struct fence *fence);

View File

@ -78,6 +78,8 @@ enum hdmi_picture_aspect {
HDMI_PICTURE_ASPECT_NONE,
HDMI_PICTURE_ASPECT_4_3,
HDMI_PICTURE_ASPECT_16_9,
HDMI_PICTURE_ASPECT_64_27,
HDMI_PICTURE_ASPECT_256_135,
HDMI_PICTURE_ASPECT_RESERVED,
};

View File

@ -77,6 +77,25 @@ extern "C" {
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
/* Picture aspect ratio options */
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
#define DRM_MODE_PICTURE_ASPECT_64_27 3
#define DRM_MODE_PICTURE_ASPECT_256_135 4
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
#define DRM_MODE_FLAG_PIC_AR_NONE \
(DRM_MODE_PICTURE_ASPECT_NONE<<19)
#define DRM_MODE_FLAG_PIC_AR_4_3 \
(DRM_MODE_PICTURE_ASPECT_4_3<<19)
#define DRM_MODE_FLAG_PIC_AR_16_9 \
(DRM_MODE_PICTURE_ASPECT_16_9<<19)
#define DRM_MODE_FLAG_PIC_AR_64_27 \
(DRM_MODE_PICTURE_ASPECT_64_27<<19)
#define DRM_MODE_FLAG_PIC_AR_256_135 \
(DRM_MODE_PICTURE_ASPECT_256_135<<19)
/* DPMS flags */
/* bit compatible with the xorg definitions. */
@ -92,11 +111,6 @@ extern "C" {
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
/* Picture aspect ratio options */
#define DRM_MODE_PICTURE_ASPECT_NONE 0
#define DRM_MODE_PICTURE_ASPECT_4_3 1
#define DRM_MODE_PICTURE_ASPECT_16_9 2
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1