drm/msm/disp/dpu: Remove unused code from drm_crtc.c

Remove a chunk of unused code from drm_crtc.c, namely
dpu_crtc_res_add, dpu_crtc_res_get, dpu_crtc_res_put
and associated static functions.

Also zap dpu_crtc_event_queue(), helper functions
and members.

Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
Jordan Crouse 2018-07-26 14:30:11 -06:00 committed by Rob Clark
parent 2c7b48e726
commit c17aeda0b0
2 changed files with 2 additions and 442 deletions

View File

@ -280,289 +280,6 @@ static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
mutex_unlock(rp_lock);
}
/**
* _dpu_crtc_rp_add_no_lock - add given resource to resource pool without lock
* @rp: Pointer to original resource pool
* @type: Resource type
* @tag: Search tag for given resource
* @val: Resource handle
* @ops: Resource callback operations
* return: 0 if success; error code otherwise
*/
static int _dpu_crtc_rp_add_no_lock(struct dpu_crtc_respool *rp, u32 type,
u64 tag, void *val, struct dpu_crtc_res_ops *ops)
{
struct dpu_crtc_res *res;
struct drm_crtc *crtc;
if (!rp || !ops) {
DPU_ERROR("invalid resource pool/ops\n");
return -EINVAL;
}
crtc = _dpu_crtc_rp_to_crtc(rp);
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
list_for_each_entry(res, &rp->res_list, list) {
if (res->type != type || res->tag != tag)
continue;
DPU_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
crtc->base.id, rp->sequence_id,
res->type, res->tag, res->val,
atomic_read(&res->refcount));
return -EEXIST;
}
res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
if (!res)
return -ENOMEM;
INIT_LIST_HEAD(&res->list);
atomic_set(&res->refcount, 1);
res->type = type;
res->tag = tag;
res->val = val;
res->ops = *ops;
list_add_tail(&res->list, &rp->res_list);
DPU_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
crtc->base.id, rp->sequence_id, type, tag);
return 0;
}
/**
* _dpu_crtc_rp_add - add given resource to resource pool
* @rp: Pointer to original resource pool
* @type: Resource type
* @tag: Search tag for given resource
* @val: Resource handle
* @ops: Resource callback operations
* return: 0 if success; error code otherwise
*/
static int _dpu_crtc_rp_add(struct dpu_crtc_respool *rp, u32 type, u64 tag,
void *val, struct dpu_crtc_res_ops *ops)
{
int rc;
if (!rp) {
DPU_ERROR("invalid resource pool\n");
return -EINVAL;
}
mutex_lock(rp->rp_lock);
rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, ops);
mutex_unlock(rp->rp_lock);
return rc;
}
/**
* _dpu_crtc_rp_get - lookup the resource from given resource pool and obtain
* if available; otherwise, obtain resource from global pool
* @rp: Pointer to original resource pool
* @type: Resource type
* @tag: Search tag for given resource
* return: Resource handle if success; pointer error or null otherwise
*/
static void *_dpu_crtc_rp_get(struct dpu_crtc_respool *rp, u32 type, u64 tag)
{
struct dpu_crtc_respool *old_rp;
struct dpu_crtc_res *res;
void *val = NULL;
int rc;
struct drm_crtc *crtc;
if (!rp) {
DPU_ERROR("invalid resource pool\n");
return NULL;
}
crtc = _dpu_crtc_rp_to_crtc(rp);
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return NULL;
}
mutex_lock(rp->rp_lock);
list_for_each_entry(res, &rp->res_list, list) {
if (res->type != type || res->tag != tag)
continue;
DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
crtc->base.id, rp->sequence_id,
res->type, res->tag, res->val,
atomic_read(&res->refcount));
atomic_inc(&res->refcount);
res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
mutex_unlock(rp->rp_lock);
return res->val;
}
list_for_each_entry(res, &rp->res_list, list) {
if (res->type != type || !(res->flags & DPU_CRTC_RES_FLAG_FREE))
continue;
DPU_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
crtc->base.id, rp->sequence_id,
res->type, res->tag, res->val,
atomic_read(&res->refcount));
atomic_inc(&res->refcount);
res->tag = tag;
res->flags &= ~DPU_CRTC_RES_FLAG_FREE;
mutex_unlock(rp->rp_lock);
return res->val;
}
/* not in this rp, try to grab from global pool */
if (rp->ops.get)
val = rp->ops.get(NULL, type, -1);
if (!IS_ERR_OR_NULL(val))
goto add_res;
/*
* Search older resource pools for hw blk with matching type,
* necessary when resource is being used by this object,
* but in previous states not yet cleaned up.
*
* This enables searching of all resources currently owned
* by this crtc even though the resource might not be used
* in the current atomic state. This allows those resources
* to be re-acquired by the new atomic state immediately
* without waiting for the resources to be fully released.
*/
else if (IS_ERR_OR_NULL(val) && (type < DPU_HW_BLK_MAX)) {
list_for_each_entry(old_rp, rp->rp_head, rp_list) {
if (old_rp == rp)
continue;
list_for_each_entry(res, &old_rp->res_list, list) {
if (res->type != type)
continue;
DRM_DEBUG_KMS("crtc%d.%u found res:0x%x//%pK/ "
"in crtc%d.%d\n",
crtc->base.id, rp->sequence_id,
res->type, res->val,
crtc->base.id,
old_rp->sequence_id);
if (res->ops.get)
res->ops.get(res->val, 0, -1);
val = res->val;
break;
}
if (!IS_ERR_OR_NULL(val))
break;
}
}
if (IS_ERR_OR_NULL(val)) {
DPU_DEBUG("crtc%d.%u failed to get res:0x%x//\n",
crtc->base.id, rp->sequence_id, type);
mutex_unlock(rp->rp_lock);
return NULL;
}
add_res:
rc = _dpu_crtc_rp_add_no_lock(rp, type, tag, val, &rp->ops);
if (rc) {
DPU_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
crtc->base.id, rp->sequence_id, type, tag);
if (rp->ops.put)
rp->ops.put(val);
val = NULL;
}
mutex_unlock(rp->rp_lock);
return val;
}
/**
* _dpu_crtc_rp_put - return given resource to resource pool
* @rp: Pointer to original resource pool
* @type: Resource type
* @tag: Search tag for given resource
* return: None
*/
static void _dpu_crtc_rp_put(struct dpu_crtc_respool *rp, u32 type, u64 tag)
{
struct dpu_crtc_res *res, *next;
struct drm_crtc *crtc;
if (!rp) {
DPU_ERROR("invalid resource pool\n");
return;
}
crtc = _dpu_crtc_rp_to_crtc(rp);
if (!crtc) {
DPU_ERROR("invalid crtc\n");
return;
}
mutex_lock(rp->rp_lock);
list_for_each_entry_safe(res, next, &rp->res_list, list) {
if (res->type != type || res->tag != tag)
continue;
DPU_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
crtc->base.id, rp->sequence_id,
res->type, res->tag, res->val,
atomic_read(&res->refcount));
if (res->flags & DPU_CRTC_RES_FLAG_FREE)
DPU_ERROR(
"crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
crtc->base.id, rp->sequence_id,
res->type, res->tag, res->val,
atomic_read(&res->refcount));
else if (atomic_dec_return(&res->refcount) == 0)
res->flags |= DPU_CRTC_RES_FLAG_FREE;
mutex_unlock(rp->rp_lock);
return;
}
DPU_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
crtc->base.id, rp->sequence_id, type, tag);
mutex_unlock(rp->rp_lock);
}
int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
void *val, struct dpu_crtc_res_ops *ops)
{
struct dpu_crtc_respool *rp;
if (!state) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
rp = &to_dpu_crtc_state(state)->rp;
return _dpu_crtc_rp_add(rp, type, tag, val, ops);
}
void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
{
struct dpu_crtc_respool *rp;
void *val;
if (!state) {
DPU_ERROR("invalid parameters\n");
return NULL;
}
rp = &to_dpu_crtc_state(state)->rp;
val = _dpu_crtc_rp_get(rp, type, tag);
if (IS_ERR(val)) {
DPU_ERROR("failed to get res type:0x%x:0x%llx\n",
type, tag);
return NULL;
}
return val;
}
void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
{
struct dpu_crtc_respool *rp;
if (!state) {
DPU_ERROR("invalid parameters\n");
return;
}
rp = &to_dpu_crtc_state(state)->rp;
_dpu_crtc_rp_put(rp, type, tag);
}
static void dpu_crtc_destroy(struct drm_crtc *crtc)
{
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
@ -2364,97 +2081,6 @@ static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
.atomic_flush = dpu_crtc_atomic_flush,
};
static void _dpu_crtc_event_cb(struct kthread_work *work)
{
struct dpu_crtc_event *event;
struct dpu_crtc *dpu_crtc;
unsigned long irq_flags;
if (!work) {
DPU_ERROR("invalid work item\n");
return;
}
event = container_of(work, struct dpu_crtc_event, kt_work);
/* set dpu_crtc to NULL for static work structures */
dpu_crtc = event->dpu_crtc;
if (!dpu_crtc)
return;
if (event->cb_func)
event->cb_func(&dpu_crtc->base, event->usr);
spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
list_add_tail(&event->list, &dpu_crtc->event_free_list);
spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
}
int dpu_crtc_event_queue(struct drm_crtc *crtc,
void (*func)(struct drm_crtc *crtc, void *usr), void *usr)
{
unsigned long irq_flags;
struct dpu_crtc *dpu_crtc;
struct msm_drm_private *priv;
struct dpu_crtc_event *event = NULL;
u32 crtc_id;
if (!crtc || !crtc->dev || !crtc->dev->dev_private || !func) {
DPU_ERROR("invalid parameters\n");
return -EINVAL;
}
dpu_crtc = to_dpu_crtc(crtc);
priv = crtc->dev->dev_private;
crtc_id = drm_crtc_index(crtc);
/*
* Obtain an event struct from the private cache. This event
* queue may be called from ISR contexts, so use a private
* cache to avoid calling any memory allocation functions.
*/
spin_lock_irqsave(&dpu_crtc->event_lock, irq_flags);
if (!list_empty(&dpu_crtc->event_free_list)) {
event = list_first_entry(&dpu_crtc->event_free_list,
struct dpu_crtc_event, list);
list_del_init(&event->list);
}
spin_unlock_irqrestore(&dpu_crtc->event_lock, irq_flags);
if (!event)
return -ENOMEM;
/* populate event node */
event->dpu_crtc = dpu_crtc;
event->cb_func = func;
event->usr = usr;
/* queue new event request */
kthread_init_work(&event->kt_work, _dpu_crtc_event_cb);
kthread_queue_work(&priv->event_thread[crtc_id].worker,
&event->kt_work);
return 0;
}
static int _dpu_crtc_init_events(struct dpu_crtc *dpu_crtc)
{
int i, rc = 0;
if (!dpu_crtc) {
DPU_ERROR("invalid crtc\n");
return -EINVAL;
}
spin_lock_init(&dpu_crtc->event_lock);
INIT_LIST_HEAD(&dpu_crtc->event_free_list);
for (i = 0; i < DPU_CRTC_MAX_EVENT_COUNT; ++i)
list_add_tail(&dpu_crtc->event_cache[i].list,
&dpu_crtc->event_free_list);
return rc;
}
/* initialize crtc */
struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
{
@ -2462,7 +2088,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
struct dpu_crtc *dpu_crtc = NULL;
struct msm_drm_private *priv = NULL;
struct dpu_kms *kms = NULL;
int i, rc;
int i;
priv = dev->dev_private;
kms = to_dpu_kms(priv->kms);
@ -2503,12 +2129,7 @@ struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
/* initialize event handling */
rc = _dpu_crtc_init_events(dpu_crtc);
if (rc) {
drm_crtc_cleanup(crtc);
kfree(dpu_crtc);
return ERR_PTR(rc);
}
spin_lock_init(&dpu_crtc->event_lock);
dpu_crtc->phandle = &kms->phandle;

View File

@ -112,23 +112,6 @@ struct dpu_crtc_frame_event {
u32 event;
};
/**
* struct dpu_crtc_event - event callback tracking structure
* @list: Linked list tracking node
* @kt_work: Kthread worker structure
* @dpu_crtc: Pointer to associated dpu_crtc structure
* @cb_func: Pointer to callback function
* @usr: Pointer to user data to be provided to the callback
*/
struct dpu_crtc_event {
struct list_head list;
struct kthread_work kt_work;
void *dpu_crtc;
void (*cb_func)(struct drm_crtc *crtc, void *usr);
void *usr;
};
/*
* Maximum number of free event structures to cache
*/
@ -172,8 +155,6 @@ struct dpu_crtc_event {
* @frame_done_comp : for frame_event_done synchronization
* @event_thread : Pointer to event handler thread
* @event_worker : Event worker queue
* @event_cache : Local cache of event worker structures
* @event_free_list : List of available event structures
* @event_lock : Spinlock around event handling code
* @misr_enable : boolean entry indicates misr enable/disable status.
* @misr_frame_count : misr frame count provided by client
@ -224,8 +205,6 @@ struct dpu_crtc {
struct completion frame_done_comp;
/* for handling internal event thread */
struct dpu_crtc_event event_cache[DPU_CRTC_MAX_EVENT_COUNT];
struct list_head event_free_list;
spinlock_t event_lock;
bool misr_enable;
u32 misr_frame_count;
@ -441,44 +420,4 @@ static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
return crtc ? crtc->enabled : false;
}
/**
* dpu_crtc_event_queue - request event callback
* @crtc: Pointer to drm crtc structure
* @func: Pointer to callback function
* @usr: Pointer to user data to be passed to callback
* Returns: Zero on success
*/
int dpu_crtc_event_queue(struct drm_crtc *crtc,
void (*func)(struct drm_crtc *crtc, void *usr), void *usr);
/**
* dpu_crtc_res_add - add given resource to resource pool in crtc state
* @state: Pointer to drm crtc state
* @type: Resource type
* @tag: Search tag for given resource
* @val: Resource handle
* @ops: Resource callback operations
* return: 0 if success; error code otherwise
*/
int dpu_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
void *val, struct dpu_crtc_res_ops *ops);
/**
* dpu_crtc_res_get - get given resource from resource pool in crtc state
* @state: Pointer to drm crtc state
* @type: Resource type
* @tag: Search tag for given resource
* return: Resource handle if success; pointer error or null otherwise
*/
void *dpu_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
/**
* dpu_crtc_res_put - return given resource to resource pool in crtc state
* @state: Pointer to drm crtc state
* @type: Resource type
* @tag: Search tag for given resource
* return: None
*/
void dpu_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
#endif /* _DPU_CRTC_H_ */