@@ -95,7 +95,6 @@ extern unsigned fscache_debug;
extern struct kobject *fscache_root;
extern struct workqueue_struct *fscache_object_wq;
extern struct workqueue_struct *fscache_op_wq;
-DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
@@ -41,8 +41,6 @@ struct kobject *fscache_root;
struct workqueue_struct *fscache_object_wq;
struct workqueue_struct *fscache_op_wq;
-DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
-
/* these values serve as lower bounds, will be adjusted in fscache_init() */
static unsigned fscache_object_max_active = 4;
static unsigned fscache_op_max_active = 2;
@@ -139,7 +137,6 @@ unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
static int __init fscache_init(void)
{
unsigned int nr_cpus = num_possible_cpus();
- unsigned int cpu;
int ret;
fscache_object_max_active =
@@ -162,9 +159,6 @@ static int __init fscache_init(void)
if (!fscache_op_wq)
goto error_op_wq;
- for_each_possible_cpu(cpu)
- init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
-
ret = fscache_proc_init();
if (ret < 0)
goto error_proc;
@@ -807,6 +807,8 @@ void fscache_object_destroy(struct fscache_object *object)
}
EXPORT_SYMBOL(fscache_object_destroy);
+static DECLARE_WAIT_QUEUE_HEAD(fscache_object_cong_wait);
+
/*
* enqueue an object for metadata-type processing
*/
@@ -815,16 +817,12 @@ void fscache_enqueue_object(struct fscache_object *object)
_enter("{OBJ%x}", object->debug_id);
if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
- wait_queue_head_t *cong_wq =
- &get_cpu_var(fscache_object_cong_wait);
if (queue_work(fscache_object_wq, &object->work)) {
if (fscache_object_congested())
- wake_up(cong_wq);
+ wake_up(&fscache_object_cong_wait);
} else
fscache_put_object(object, fscache_obj_put_queue);
-
- put_cpu_var(fscache_object_cong_wait);
}
}
@@ -842,16 +840,15 @@ void fscache_enqueue_object(struct fscache_object *object)
*/
bool fscache_object_sleep_till_congested(signed long *timeoutp)
{
- wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
DEFINE_WAIT(wait);
if (fscache_object_congested())
return true;
- add_wait_queue_exclusive(cong_wq, &wait);
+ add_wait_queue_exclusive(&fscache_object_cong_wait, &wait);
if (!fscache_object_congested())
*timeoutp = schedule_timeout(*timeoutp);
- finish_wait(cong_wq, &wait);
+ finish_wait(&fscache_object_cong_wait, &wait);
return fscache_object_congested();
}
@@ -229,7 +229,7 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
# define might_sleep_no_state_check() \
- do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
+ do { ___might_sleep(__FILE__, __LINE__, 0); } while (0)
/**
* cant_sleep - annotation for functions that cannot sleep
@@ -211,12 +211,12 @@ do { \
preempt_count_dec(); \
} while (0)
-#ifdef CONFIG_PREEMPT_RT
+#ifndef CONFIG_PREEMPT_RT
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
-# define preempt_check_resched_rt() preempt_check_resched()
+# define preempt_check_resched_rt() barrier();
#else
# define preempt_enable_no_resched() preempt_enable()
-# define preempt_check_resched_rt() barrier();
+# define preempt_check_resched_rt() preempt_check_resched()
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
@@ -769,7 +769,7 @@ config NUMA_BALANCING
bool "Memory placement aware NUMA scheduler"
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
- depends on SMP && NUMA && MIGRATION
+ depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
@@ -2158,7 +2158,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
ktime_t to = NSEC_PER_SEC / HZ;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_hrtimeout(&to, HRTIMER_MODE_REL);
+ schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
continue;
}
@@ -1 +1 @@
--rt66
+-rt67
@@ -81,7 +81,7 @@
struct zsmalloc_handle {
unsigned long addr;
- struct mutex lock;
+ spinlock_t lock;
};
#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle))
@@ -368,7 +368,7 @@ static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
if (p) {
struct zsmalloc_handle *zh = p;
- mutex_init(&zh->lock);
+ spin_lock_init(&zh->lock);
}
#endif
return (unsigned long)p;
@@ -926,7 +926,7 @@ static inline int testpin_tag(unsigned long handle)
#ifdef CONFIG_PREEMPT_RT
struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- return mutex_is_locked(&zh->lock);
+ return spin_is_locked(&zh->lock);
#else
return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
#endif
@@ -937,7 +937,7 @@ static inline int trypin_tag(unsigned long handle)
#ifdef CONFIG_PREEMPT_RT
struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- return mutex_trylock(&zh->lock);
+ return spin_trylock(&zh->lock);
#else
return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
#endif
@@ -948,7 +948,7 @@ static void pin_tag(unsigned long handle)
#ifdef CONFIG_PREEMPT_RT
struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- return mutex_lock(&zh->lock);
+ return spin_lock(&zh->lock);
#else
bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
#endif
@@ -959,7 +959,7 @@ static void unpin_tag(unsigned long handle)
#ifdef CONFIG_PREEMPT_RT
struct zsmalloc_handle *zh = zs_get_pure_handle(handle);
- return mutex_unlock(&zh->lock);
+ return spin_unlock(&zh->lock);
#else
bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
#endif