tick/rcu: Remove obsolete rcu_needs_cpu() parameters
With the removal of CONFIG_RCU_FAST_NO_HZ, the parameters in rcu_needs_cpu() are not necessary anymore. Simply remove them. Signed-off-by: Frederic Weisbecker <frederic@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul E. McKenney <paulmck@kernel.org> Cc: Paul Menzel <pmenzel@molgen.mpg.de>
This commit is contained in:
parent
a1ff03cd6f
commit
2984539959
|
@ -64,9 +64,8 @@ static inline void rcu_softirq_qs(void)
|
||||||
rcu_tasks_qs(current, (preempt)); \
|
rcu_tasks_qs(current, (preempt)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
static inline int rcu_needs_cpu(void)
|
||||||
{
|
{
|
||||||
*nextevt = KTIME_MAX;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
|
|
||||||
void rcu_softirq_qs(void);
|
void rcu_softirq_qs(void);
|
||||||
void rcu_note_context_switch(bool preempt);
|
void rcu_note_context_switch(bool preempt);
|
||||||
int rcu_needs_cpu(u64 basem, u64 *nextevt);
|
int rcu_needs_cpu(void);
|
||||||
void rcu_cpu_stall_reset(void);
|
void rcu_cpu_stall_reset(void);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -1086,9 +1086,8 @@ void rcu_irq_enter_irqson(void)
|
||||||
* Just check whether or not this CPU has non-offloaded RCU callbacks
|
* Just check whether or not this CPU has non-offloaded RCU callbacks
|
||||||
* queued.
|
* queued.
|
||||||
*/
|
*/
|
||||||
int rcu_needs_cpu(u64 basemono, u64 *nextevt)
|
int rcu_needs_cpu(void)
|
||||||
{
|
{
|
||||||
*nextevt = KTIME_MAX;
|
|
||||||
return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
|
return !rcu_segcblist_empty(&this_cpu_ptr(&rcu_data)->cblist) &&
|
||||||
!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
|
!rcu_rdp_is_offloaded(this_cpu_ptr(&rcu_data));
|
||||||
}
|
}
|
||||||
|
|
|
@ -785,7 +785,7 @@ static inline bool local_timer_softirq_pending(void)
|
||||||
|
|
||||||
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
||||||
{
|
{
|
||||||
u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
|
u64 basemono, next_tick, delta, expires;
|
||||||
unsigned long basejiff;
|
unsigned long basejiff;
|
||||||
unsigned int seq;
|
unsigned int seq;
|
||||||
|
|
||||||
|
@ -808,7 +808,7 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
||||||
* minimal delta which brings us back to this place
|
* minimal delta which brings us back to this place
|
||||||
* immediately. Lather, rinse and repeat...
|
* immediately. Lather, rinse and repeat...
|
||||||
*/
|
*/
|
||||||
if (rcu_needs_cpu(basemono, &next_rcu) || arch_needs_cpu() ||
|
if (rcu_needs_cpu() || arch_needs_cpu() ||
|
||||||
irq_work_needs_cpu() || local_timer_softirq_pending()) {
|
irq_work_needs_cpu() || local_timer_softirq_pending()) {
|
||||||
next_tick = basemono + TICK_NSEC;
|
next_tick = basemono + TICK_NSEC;
|
||||||
} else {
|
} else {
|
||||||
|
@ -819,10 +819,8 @@ static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu)
|
||||||
* disabled this also looks at the next expiring
|
* disabled this also looks at the next expiring
|
||||||
* hrtimer.
|
* hrtimer.
|
||||||
*/
|
*/
|
||||||
next_tmr = get_next_timer_interrupt(basejiff, basemono);
|
next_tick = get_next_timer_interrupt(basejiff, basemono);
|
||||||
ts->next_timer = next_tmr;
|
ts->next_timer = next_tick;
|
||||||
/* Take the next rcu event into account */
|
|
||||||
next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue