drm/vc4: Use the atomic state's commit workqueue.

Now that we're using the atomic helpers for fence waits, we can use
the same codepath as drm_atomic_helper_commit() does for async,
getting rid of our custom vc4_commit struct.

Signed-off-by: Eric Anholt <eric@anholt.net>
Link: http://patchwork.freedesktop.org/patch/msgid/20170621185002.28563-3-eric@anholt.net
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Boris Brezillon <boris.brezillon@free-electrons.com>
This commit is contained in:
Eric Anholt 2017-06-21 11:50:01 -07:00
parent 53ad06949d
commit cf1b372ec1
1 changed files with 13 additions and 58 deletions

View File

@ -29,16 +29,9 @@ static void vc4_output_poll_changed(struct drm_device *dev)
drm_fbdev_cma_hotplug_event(vc4->fbdev);
}
struct vc4_commit {
struct drm_device *dev;
struct drm_atomic_state *state;
struct vc4_seqno_cb cb;
};
static void
vc4_atomic_complete_commit(struct vc4_commit *c)
vc4_atomic_complete_commit(struct drm_atomic_state *state)
{
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
@ -72,28 +65,14 @@ vc4_atomic_complete_commit(struct vc4_commit *c)
drm_atomic_state_put(state);
up(&vc4->async_modeset);
kfree(c);
}
static void
vc4_atomic_complete_commit_seqno_cb(struct vc4_seqno_cb *cb)
static void commit_work(struct work_struct *work)
{
struct vc4_commit *c = container_of(cb, struct vc4_commit, cb);
vc4_atomic_complete_commit(c);
}
static struct vc4_commit *commit_init(struct drm_atomic_state *state)
{
struct vc4_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
if (!c)
return NULL;
c->dev = state->dev;
c->state = state;
return c;
struct drm_atomic_state *state = container_of(work,
struct drm_atomic_state,
commit_work);
vc4_atomic_complete_commit(state);
}
/**
@ -115,29 +94,19 @@ static int vc4_atomic_commit(struct drm_device *dev,
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
int ret;
int i;
uint64_t wait_seqno = 0;
struct vc4_commit *c;
struct drm_plane *plane;
struct drm_plane_state *new_state;
c = commit_init(state);
if (!c)
return -ENOMEM;
ret = drm_atomic_helper_setup_commit(state, nonblock);
if (ret)
return ret;
INIT_WORK(&state->commit_work, commit_work);
ret = down_interruptible(&vc4->async_modeset);
if (ret) {
kfree(c);
if (ret)
return ret;
}
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret) {
kfree(c);
up(&vc4->async_modeset);
return ret;
}
@ -146,22 +115,11 @@ static int vc4_atomic_commit(struct drm_device *dev,
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
if (ret) {
drm_atomic_helper_cleanup_planes(dev, state);
kfree(c);
up(&vc4->async_modeset);
return ret;
}
}
for_each_plane_in_state(state, plane, new_state, i) {
if ((plane->state->fb != new_state->fb) && new_state->fb) {
struct drm_gem_cma_object *cma_bo =
drm_fb_cma_get_gem_obj(new_state->fb, 0);
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
wait_seqno = max(bo->seqno, wait_seqno);
}
}
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
@ -187,13 +145,10 @@ static int vc4_atomic_commit(struct drm_device *dev,
*/
drm_atomic_state_get(state);
if (nonblock) {
vc4_queue_seqno_cb(dev, &c->cb, wait_seqno,
vc4_atomic_complete_commit_seqno_cb);
} else {
vc4_wait_for_seqno(dev, wait_seqno, ~0ull, false);
vc4_atomic_complete_commit(c);
}
if (nonblock)
queue_work(system_unbound_wq, &state->commit_work);
else
vc4_atomic_complete_commit(state);
return 0;
}