drm: Create drm_send_event helpers

Use them in the core vblank code and exynos/vmwgfx drivers.

Note that the difference between wake_up_all and _interruptible in
vmwgfx doesn't matter since the only waiter is the core code in
drm_fops.c. And that is interruptible.

v2: Adjust existing kerneldoc too.

Reviewed-by: Alex Deucher <alexander.deucher@amd.com> (v1)
Acked-by: Daniel Stone <daniels@collabora.com>
Cc: Alex Deucher <alexander.deucher@amd.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Inki Dae <inki.dae@samsung.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1452548477-15905-6-git-send-email-daniel.vetter@ffwll.ch
Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
[danvet: Squash in compile fixup, spotted by 0-day.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
Daniel Vetter 2016-01-11 22:40:59 +01:00
parent 6d3729ac13
commit fb740cf249
6 changed files with 48 additions and 18 deletions

View File

@ -687,7 +687,9 @@ EXPORT_SYMBOL(drm_poll);
* This function prepares the passed in event for eventual delivery. If the event
* doesn't get delivered (because the IOCTL fails later on, before queuing up
* anything) then the even must be cancelled and freed using
* drm_event_cancel_free().
* drm_event_cancel_free(). Successfully initialized events should be sent out
* using drm_send_event() or drm_send_event_locked() to signal completion of the
* asynchronous event to userspace.
*
* If callers embedded @p into a larger structure it must be allocated with
* kmalloc and @p must be the first member element.
@ -743,3 +745,41 @@ void drm_event_cancel_free(struct drm_device *dev,
p->destroy(p);
}
EXPORT_SYMBOL(drm_event_cancel_free);
/**
* drm_send_event_locked - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. Callers must already hold
* dev->event_lock, see drm_send_event() for the unlocked version.
*/
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
assert_spin_locked(&dev->event_lock);
list_add_tail(&e->link,
&e->file_priv->event_list);
wake_up_interruptible(&e->file_priv->event_wait);
}
EXPORT_SYMBOL(drm_send_event_locked);
/**
* drm_send_event - send DRM event to file descriptor
* @dev: DRM device
* @e: DRM event to deliver
*
* This function sends the event @e, initialized with drm_event_reserve_init(),
* to its associated userspace DRM file. This function acquires dev->event_lock,
* see drm_send_event_locked() for callers which already hold this lock.
*/
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
{
unsigned long irqflags;
spin_lock_irqsave(&dev->event_lock, irqflags);
drm_send_event_locked(dev, e);
spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);

View File

@ -983,15 +983,12 @@ static void send_vblank_event(struct drm_device *dev,
struct drm_pending_vblank_event *e,
unsigned long seq, struct timeval *now)
{
assert_spin_locked(&dev->event_lock);
e->event.sequence = seq;
e->event.tv_sec = now->tv_sec;
e->event.tv_usec = now->tv_usec;
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
drm_send_event_locked(dev, &e->base);
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
}

View File

@ -880,7 +880,6 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timeval now;
unsigned long flags;
if (list_empty(&runqueue_node->event_list))
return;
@ -893,10 +892,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
e->event.tv_usec = now.tv_usec;
e->event.cmdlist_no = cmdlist_no;
spin_lock_irqsave(&drm_dev->event_lock, flags);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
drm_send_event(drm_dev, &e->base);
}
static irqreturn_t g2d_irq_handler(int irq, void *dev_id)

View File

@ -1407,7 +1407,6 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
struct drm_exynos_ipp_send_event *e;
struct list_head *head;
struct timeval now;
unsigned long flags;
u32 tbuf_id[EXYNOS_DRM_OPS_MAX] = {0, };
int ret, i;
@ -1520,10 +1519,7 @@ static int ipp_send_event(struct exynos_drm_ippdrv *ippdrv,
for_each_ipp_ops(i)
e->event.buf_id[i] = tbuf_id[i];
spin_lock_irqsave(&drm_dev->event_lock, flags);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
drm_send_event(drm_dev, &e->base);
mutex_unlock(&c_node->event_lock);
DRM_DEBUG_KMS("done cmd[%d]prop_id[%d]buf_id[%d]\n",

View File

@ -880,9 +880,8 @@ static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
}
list_del_init(&eaction->fpriv_head);
list_add_tail(&eaction->event->link, &file_priv->event_list);
eaction->event = NULL;
wake_up_all(&file_priv->event_wait);
drm_send_event_locked(dev, eaction->event);
spin_unlock_irqrestore(&dev->event_lock, irq_flags);
}

View File

@ -932,6 +932,8 @@ int drm_event_reserve_init(struct drm_device *dev,
struct drm_event *e);
void drm_event_cancel_free(struct drm_device *dev,
struct drm_pending_event *p);
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
/* Misc. IOCTL support (drm_ioctl.c) */
int drm_noop(struct drm_device *dev, void *data,