mirror of https://gitee.com/openkylin/linux.git
drm/i915: move more generic utils to i915_utils.h
Reduce clutter from i915_drv.h and intel_drv.h. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/8c197872384fc35442b738c21ba0da9336e02a85.1556809195.git.jani.nikula@intel.com
This commit is contained in:
parent
cb36330467
commit
b30ed4cc2e
|
@ -3357,50 +3357,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
|
|||
#define INTEL_BROADCAST_RGB_FULL 1
|
||||
#define INTEL_BROADCAST_RGB_LIMITED 2
|
||||
|
||||
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
|
||||
{
|
||||
unsigned long j = msecs_to_jiffies(m);
|
||||
|
||||
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
|
||||
}
|
||||
|
||||
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
|
||||
{
|
||||
/* nsecs_to_jiffies64() does not guard against overflow */
|
||||
if (NSEC_PER_SEC % HZ &&
|
||||
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* If you need to wait X milliseconds between events A and B, but event B
|
||||
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
|
||||
* when event A happened, then just before event B you call this function and
|
||||
* pass the timestamp as the first argument, and X as the second argument.
|
||||
*/
|
||||
static inline void
|
||||
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
|
||||
{
|
||||
unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
|
||||
|
||||
/*
|
||||
* Don't re-read the value of "jiffies" every time since it may change
|
||||
* behind our back and break the math.
|
||||
*/
|
||||
tmp_jiffies = jiffies;
|
||||
target_jiffies = timestamp_jiffies +
|
||||
msecs_to_jiffies_timeout(to_wait_ms);
|
||||
|
||||
if (time_after(target_jiffies, tmp_jiffies)) {
|
||||
remaining_jiffies = target_jiffies - tmp_jiffies;
|
||||
while (remaining_jiffies)
|
||||
remaining_jiffies =
|
||||
schedule_timeout_uninterruptible(remaining_jiffies);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
|
||||
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define __I915_UTILS_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
|
@ -176,6 +177,158 @@ static inline void drain_delayed_work(struct delayed_work *dw)
|
|||
} while (delayed_work_pending(dw));
|
||||
}
|
||||
|
||||
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
|
||||
{
|
||||
unsigned long j = msecs_to_jiffies(m);
|
||||
|
||||
return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
|
||||
}
|
||||
|
||||
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
|
||||
{
|
||||
/* nsecs_to_jiffies64() does not guard against overflow */
|
||||
if (NSEC_PER_SEC % HZ &&
|
||||
div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* If you need to wait X milliseconds between events A and B, but event B
|
||||
* doesn't happen exactly after event A, you record the timestamp (jiffies) of
|
||||
* when event A happened, then just before event B you call this function and
|
||||
* pass the timestamp as the first argument, and X as the second argument.
|
||||
*/
|
||||
static inline void
|
||||
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
|
||||
{
|
||||
unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
|
||||
|
||||
/*
|
||||
* Don't re-read the value of "jiffies" every time since it may change
|
||||
* behind our back and break the math.
|
||||
*/
|
||||
tmp_jiffies = jiffies;
|
||||
target_jiffies = timestamp_jiffies +
|
||||
msecs_to_jiffies_timeout(to_wait_ms);
|
||||
|
||||
if (time_after(target_jiffies, tmp_jiffies)) {
|
||||
remaining_jiffies = target_jiffies - tmp_jiffies;
|
||||
while (remaining_jiffies)
|
||||
remaining_jiffies =
|
||||
schedule_timeout_uninterruptible(remaining_jiffies);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __wait_for - magic wait macro
|
||||
*
|
||||
* Macro to help avoid open coding check/wait/timeout patterns. Note that it's
|
||||
* important that we check the condition again after having timed out, since the
|
||||
* timeout could be due to preemption or similar and we've never had a chance to
|
||||
* check the condition before the timeout.
|
||||
*/
|
||||
#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
|
||||
const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
|
||||
long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
|
||||
int ret__; \
|
||||
might_sleep(); \
|
||||
for (;;) { \
|
||||
const bool expired__ = ktime_after(ktime_get_raw(), end__); \
|
||||
OP; \
|
||||
/* Guarantee COND check prior to timeout */ \
|
||||
barrier(); \
|
||||
if (COND) { \
|
||||
ret__ = 0; \
|
||||
break; \
|
||||
} \
|
||||
if (expired__) { \
|
||||
ret__ = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
usleep_range(wait__, wait__ * 2); \
|
||||
if (wait__ < (Wmax)) \
|
||||
wait__ <<= 1; \
|
||||
} \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
|
||||
(Wmax))
|
||||
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
|
||||
|
||||
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
|
||||
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
|
||||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
|
||||
#else
|
||||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define _wait_for_atomic(COND, US, ATOMIC) \
|
||||
({ \
|
||||
int cpu, ret, timeout = (US) * 1000; \
|
||||
u64 base; \
|
||||
_WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
|
||||
if (!(ATOMIC)) { \
|
||||
preempt_disable(); \
|
||||
cpu = smp_processor_id(); \
|
||||
} \
|
||||
base = local_clock(); \
|
||||
for (;;) { \
|
||||
u64 now = local_clock(); \
|
||||
if (!(ATOMIC)) \
|
||||
preempt_enable(); \
|
||||
/* Guarantee COND check prior to timeout */ \
|
||||
barrier(); \
|
||||
if (COND) { \
|
||||
ret = 0; \
|
||||
break; \
|
||||
} \
|
||||
if (now - base >= timeout) { \
|
||||
ret = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
cpu_relax(); \
|
||||
if (!(ATOMIC)) { \
|
||||
preempt_disable(); \
|
||||
if (unlikely(cpu != smp_processor_id())) { \
|
||||
timeout -= now - base; \
|
||||
cpu = smp_processor_id(); \
|
||||
base = local_clock(); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
ret; \
|
||||
})
|
||||
|
||||
#define wait_for_us(COND, US) \
|
||||
({ \
|
||||
int ret__; \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(US)); \
|
||||
if ((US) > 10) \
|
||||
ret__ = _wait_for((COND), (US), 10, 10); \
|
||||
else \
|
||||
ret__ = _wait_for_atomic((COND), (US), 0); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define wait_for_atomic_us(COND, US) \
|
||||
({ \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(US)); \
|
||||
BUILD_BUG_ON((US) > 50000); \
|
||||
_wait_for_atomic((COND), (US), 1); \
|
||||
})
|
||||
|
||||
#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
|
||||
|
||||
#define KHz(x) (1000 * (x))
|
||||
#define MHz(x) KHz(1000 * (x))
|
||||
|
||||
#define KBps(x) (1000 * (x))
|
||||
#define MBps(x) KBps(1000 * (x))
|
||||
#define GBps(x) ((u64)1000 * MBps((x)))
|
||||
|
||||
static inline const char *yesno(bool v)
|
||||
{
|
||||
return v ? "yes" : "no";
|
||||
|
|
|
@ -46,114 +46,6 @@
|
|||
|
||||
struct drm_printer;
|
||||
|
||||
/**
|
||||
* __wait_for - magic wait macro
|
||||
*
|
||||
* Macro to help avoid open coding check/wait/timeout patterns. Note that it's
|
||||
* important that we check the condition again after having timed out, since the
|
||||
* timeout could be due to preemption or similar and we've never had a chance to
|
||||
* check the condition before the timeout.
|
||||
*/
|
||||
#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
|
||||
const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
|
||||
long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
|
||||
int ret__; \
|
||||
might_sleep(); \
|
||||
for (;;) { \
|
||||
const bool expired__ = ktime_after(ktime_get_raw(), end__); \
|
||||
OP; \
|
||||
/* Guarantee COND check prior to timeout */ \
|
||||
barrier(); \
|
||||
if (COND) { \
|
||||
ret__ = 0; \
|
||||
break; \
|
||||
} \
|
||||
if (expired__) { \
|
||||
ret__ = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
usleep_range(wait__, wait__ * 2); \
|
||||
if (wait__ < (Wmax)) \
|
||||
wait__ <<= 1; \
|
||||
} \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
|
||||
(Wmax))
|
||||
#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
|
||||
|
||||
/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
|
||||
#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
|
||||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
|
||||
#else
|
||||
# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
|
||||
#endif
|
||||
|
||||
#define _wait_for_atomic(COND, US, ATOMIC) \
|
||||
({ \
|
||||
int cpu, ret, timeout = (US) * 1000; \
|
||||
u64 base; \
|
||||
_WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
|
||||
if (!(ATOMIC)) { \
|
||||
preempt_disable(); \
|
||||
cpu = smp_processor_id(); \
|
||||
} \
|
||||
base = local_clock(); \
|
||||
for (;;) { \
|
||||
u64 now = local_clock(); \
|
||||
if (!(ATOMIC)) \
|
||||
preempt_enable(); \
|
||||
/* Guarantee COND check prior to timeout */ \
|
||||
barrier(); \
|
||||
if (COND) { \
|
||||
ret = 0; \
|
||||
break; \
|
||||
} \
|
||||
if (now - base >= timeout) { \
|
||||
ret = -ETIMEDOUT; \
|
||||
break; \
|
||||
} \
|
||||
cpu_relax(); \
|
||||
if (!(ATOMIC)) { \
|
||||
preempt_disable(); \
|
||||
if (unlikely(cpu != smp_processor_id())) { \
|
||||
timeout -= now - base; \
|
||||
cpu = smp_processor_id(); \
|
||||
base = local_clock(); \
|
||||
} \
|
||||
} \
|
||||
} \
|
||||
ret; \
|
||||
})
|
||||
|
||||
#define wait_for_us(COND, US) \
|
||||
({ \
|
||||
int ret__; \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(US)); \
|
||||
if ((US) > 10) \
|
||||
ret__ = _wait_for((COND), (US), 10, 10); \
|
||||
else \
|
||||
ret__ = _wait_for_atomic((COND), (US), 0); \
|
||||
ret__; \
|
||||
})
|
||||
|
||||
#define wait_for_atomic_us(COND, US) \
|
||||
({ \
|
||||
BUILD_BUG_ON(!__builtin_constant_p(US)); \
|
||||
BUILD_BUG_ON((US) > 50000); \
|
||||
_wait_for_atomic((COND), (US), 1); \
|
||||
})
|
||||
|
||||
#define wait_for_atomic(COND, MS) wait_for_atomic_us((COND), (MS) * 1000)
|
||||
|
||||
#define KHz(x) (1000 * (x))
|
||||
#define MHz(x) KHz(1000 * (x))
|
||||
|
||||
#define KBps(x) (1000 * (x))
|
||||
#define MBps(x) KBps(1000 * (x))
|
||||
#define GBps(x) ((u64)1000 * MBps((x)))
|
||||
|
||||
/*
|
||||
* Display related stuff
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue