drm/color: un-inline drm_color_lut_extract()
The function is not that big, but it's also not used for anything performance critical. Make it a normal function. As a side effect, this apparently makes sparse smarter about what it's doing, and gets rid of the warning: ./include/drm/drm_color_mgmt.h:53:28: warning: shift too big (4294967295) for type unsigned long ./include/drm/drm_color_mgmt.h:53:28: warning: cast truncates bits from constant value (8000000000000000 becomes 0) v2: rebased Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com> Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/1485531291-24821-1-git-send-email-jani.nikula@intel.com
This commit is contained in:
parent
cbef909939
commit
8f2e045ec8
|
@ -87,6 +87,30 @@
|
|||
* "GAMMA_LUT" property above.
|
||||
*/
|
||||
|
||||
/**
|
||||
* drm_color_lut_extract - clamp and round LUT entries
|
||||
* @user_input: input value
|
||||
* @bit_precision: number of bits the hw LUT supports
|
||||
*
|
||||
* Extract a degamma/gamma LUT value provided by user (in the form of
|
||||
* &drm_color_lut entries) and round it to the precision supported by the
|
||||
* hardware.
|
||||
*/
|
||||
uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision)
|
||||
{
|
||||
uint32_t val = user_input;
|
||||
uint32_t max = 0xffff >> (16 - bit_precision);
|
||||
|
||||
/* Round only if we're not using full precision. */
|
||||
if (bit_precision < 16) {
|
||||
val += 1UL << (16 - bit_precision - 1);
|
||||
val >>= 16 - bit_precision;
|
||||
}
|
||||
|
||||
return clamp_val(val, 0, max);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_color_lut_extract);
|
||||
|
||||
/**
|
||||
* drm_crtc_enable_color_mgmt - enable color management properties
|
||||
* @crtc: DRM CRTC
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
#include <linux/ctype.h>
|
||||
|
||||
uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
|
||||
|
||||
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
|
||||
uint degamma_lut_size,
|
||||
bool has_ctm,
|
||||
|
@ -33,29 +35,4 @@ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
|
|||
int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
|
||||
int gamma_size);
|
||||
|
||||
/**
|
||||
* drm_color_lut_extract - clamp and round LUT entries
|
||||
* @user_input: input value
|
||||
* @bit_precision: number of bits the hw LUT supports
|
||||
*
|
||||
* Extract a degamma/gamma LUT value provided by user (in the form of
|
||||
* &drm_color_lut entries) and round it to the precision supported by the
|
||||
* hardware.
|
||||
*/
|
||||
static inline uint32_t drm_color_lut_extract(uint32_t user_input,
|
||||
uint32_t bit_precision)
|
||||
{
|
||||
uint32_t val = user_input;
|
||||
uint32_t max = 0xffff >> (16 - bit_precision);
|
||||
|
||||
/* Round only if we're not using full precision. */
|
||||
if (bit_precision < 16) {
|
||||
val += 1UL << (16 - bit_precision - 1);
|
||||
val >>= 16 - bit_precision;
|
||||
}
|
||||
|
||||
return clamp_val(val, 0, max);
|
||||
}
|
||||
|
||||
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue