@@ -111,6 +111,19 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
/*
+ * for_each_active_base: iterate over all active clock bases
+ * @_index: 'int' variable for internal purpose
+ * @_base: holds pointer to a active clock base
+ * @_cpu_base: cpu base to iterate on
+ * @_active_bases: 'unsigned int' variable for internal purpose
+ */
+#define for_each_active_base(_index, _base, _cpu_base, _active_bases) \
+ for ((_active_bases) = (_cpu_base)->active_bases; \
+ (_index) = ffs(_active_bases), \
+ (_base) = (_cpu_base)->clock_base + (_index) - 1, (_index); \
+ (_active_bases) &= ~(1 << ((_index) - 1)))
+
+/*
* Get the coarse grained time at the softirq based on xtime and
* wall_to_monotonic.
*/
@@ -443,19 +456,15 @@ static inline void debug_deactivate(struct hrtimer *timer)
#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
{
- struct hrtimer_clock_base *base = cpu_base->clock_base;
+ struct hrtimer_clock_base *base;
ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
+ struct hrtimer *timer;
+ unsigned int active_bases;
int i;
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
- struct timerqueue_node *next;
- struct hrtimer *timer;
-
- next = timerqueue_getnext(&base->active);
- if (!next)
- continue;
-
- timer = container_of(next, struct hrtimer, node);
+ for_each_active_base(i, base, cpu_base, active_bases) {
+ timer = container_of(timerqueue_getnext(&base->active),
+ struct hrtimer, node);
expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
if (expires.tv64 < expires_next.tv64)
expires_next = expires;
@@ -1245,6 +1254,8 @@ void hrtimer_interrupt(struct clock_event_device *dev)
{
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
+ struct hrtimer_clock_base *base;
+ unsigned int active_bases;
int i, retries = 0;
BUG_ON(!cpu_base->hres_active);
@@ -1264,15 +1275,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
*/
cpu_base->expires_next.tv64 = KTIME_MAX;
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- struct hrtimer_clock_base *base;
+ for_each_active_base(i, base, cpu_base, active_bases) {
struct timerqueue_node *node;
ktime_t basenow;
- if (!(cpu_base->active_bases & (1 << i)))
- continue;
-
- base = cpu_base->clock_base + i;
basenow = ktime_add(now, base->offset);
while ((node = timerqueue_getnext(&base->active))) {
@@ -1435,16 +1441,13 @@ void hrtimer_run_queues(void)
struct timerqueue_node *node;
struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base;
+ unsigned int active_bases;
int index, gettime = 1;
if (hrtimer_hres_active())
return;
- for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
- base = &cpu_base->clock_base[index];
- if (!timerqueue_getnext(&base->active))
- continue;
-
+ for_each_active_base(index, base, cpu_base, active_bases) {
if (gettime) {
hrtimer_get_softirq_time(cpu_base);
gettime = 0;
@@ -1665,6 +1668,8 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
static void migrate_hrtimers(int scpu)
{
struct hrtimer_cpu_base *old_base, *new_base;
+ struct hrtimer_clock_base *clock_base;
+ unsigned int active_bases;
int i;
BUG_ON(cpu_online(scpu));
@@ -1680,9 +1685,9 @@ static void migrate_hrtimers(int scpu)
raw_spin_lock(&new_base->lock);
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
- for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
- migrate_hrtimer_list(&old_base->clock_base[i],
- &new_base->clock_base[i]);
+ for_each_active_base(i, clock_base, old_base, active_bases) {
+ migrate_hrtimer_list(clock_base,
+ &new_base->clock_base[clock_base->index]);
}
raw_spin_unlock(&old_base->lock);