@@ -55,16 +55,7 @@ static struct uart_driver serial8250_reg;
static unsigned int skip_txen_test; /* force skip of txen test at init time */
-/*
- * On -rt we can have a more delays, and legitimately
- * so - so don't drop work spuriously and spam the
- * syslog:
- */
-#ifdef CONFIG_PREEMPT_RT
-# define PASS_LIMIT 1000000
-#else
-# define PASS_LIMIT 512
-#endif
+#define PASS_LIMIT 512
#include <asm/serial.h>
/*
@@ -95,6 +95,7 @@
#include <linux/sched/stat.h>
#include <linux/posix-timers.h>
#include <trace/events/oom.h>
+#include <linux/swait.h>
#include "internal.h"
#include "fd.h"
@@ -1902,6 +1902,7 @@ static void cont_add(int ctx, int cpu, u32 caller_id, int facility, int level,
// but later continuations can add a newline.
if (flags & LOG_NEWLINE) {
c->flags |= LOG_NEWLINE;
+ cont_flush(ctx);
}
}
@@ -2746,6 +2747,7 @@ static int printk_kthread_func(void *data)
&len, printk_time);
console_lock();
+ console_may_schedule = 0;
call_console_drivers(master_seq, ext_text, ext_len, text, len,
msg->level, msg->facility);
if (len > 0 || ext_len > 0)
@@ -1 +1 @@
--rt26
+-rt27
@@ -1635,7 +1635,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
enableirqs = true;
#ifdef CONFIG_PREEMPT_RT
- if (system_state > SYSTEM_BOOTING)
+ if (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND)
enableirqs = true;
#endif
if (enableirqs)
@@ -2382,9 +2382,6 @@ static void flush_all(struct kmem_cache *s)
for_each_online_cpu(cpu) {
struct slub_free_list *f;
- if (!has_cpu_slab(cpu, s))
- continue;
-
f = &per_cpu(slub_free_list, cpu);
raw_spin_lock_irq(&f->lock);
list_splice_init(&f->list, &tofree);
@@ -2751,7 +2748,8 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
unsigned long tid;
if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
- WARN_ON_ONCE(!preemptible() && system_state >= SYSTEM_SCHEDULING);
+ WARN_ON_ONCE(!preemptible() &&
+ (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
s = slab_pre_alloc_hook(s, gfpflags);
if (!s)
@@ -3216,7 +3214,8 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
int i;
if (IS_ENABLED(CONFIG_PREEMPT_RT) && IS_ENABLED(CONFIG_DEBUG_ATOMIC_SLEEP))
- WARN_ON_ONCE(!preemptible() && system_state >= SYSTEM_SCHEDULING);
+ WARN_ON_ONCE(!preemptible() &&
+ (system_state > SYSTEM_BOOTING && system_state < SYSTEM_SUSPEND));
/* memcg and kmem_cache debug support */
s = slab_pre_alloc_hook(s, flags);
@@ -372,6 +372,8 @@ static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
* per-cpu code
**********************************/
static DEFINE_PER_CPU(u8 *, zswap_dstmem);
+/* Used for zswap_dstmem and tfm */
+static DEFINE_LOCAL_IRQ_LOCK(zswap_cpu_lock);
static int zswap_dstmem_prepare(unsigned int cpu)
{
@@ -889,10 +891,11 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
dlen = PAGE_SIZE;
src = (u8 *)zhdr + sizeof(struct zswap_header);
dst = kmap_atomic(page);
- tfm = *get_cpu_ptr(entry->pool->tfm);
+ local_lock(zswap_cpu_lock);
+ tfm = *this_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length,
dst, &dlen);
- put_cpu_ptr(entry->pool->tfm);
+ local_unlock(zswap_cpu_lock);
kunmap_atomic(dst);
BUG_ON(ret);
BUG_ON(dlen != PAGE_SIZE);
@@ -981,8 +984,6 @@ static void zswap_fill_page(void *ptr, unsigned long value)
memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
}
-/* protect zswap_dstmem from concurrency */
-static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock);
/*********************************
* frontswap hooks
**********************************/
@@ -1060,7 +1061,8 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
}
/* compress */
- dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem);
+ local_lock(zswap_cpu_lock);
+ dst = *this_cpu_ptr(&zswap_dstmem);
tfm = *this_cpu_ptr(entry->pool->tfm);
src = kmap_atomic(page);
ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
@@ -1088,7 +1090,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
memcpy(buf, &zhdr, hlen);
memcpy(buf + hlen, dst, dlen);
zpool_unmap_handle(entry->pool->zpool, handle);
- put_locked_var(zswap_dstmem_lock, zswap_dstmem);
+ local_unlock(zswap_cpu_lock);
/* populate entry */
entry->offset = offset;
@@ -1116,7 +1118,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
return 0;
put_dstmem:
- put_locked_var(zswap_dstmem_lock, zswap_dstmem);
+ local_unlock(zswap_cpu_lock);
zswap_pool_put(entry->pool);
freepage:
zswap_entry_cache_free(entry);
@@ -1161,9 +1163,10 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset,
if (zpool_evictable(entry->pool->zpool))
src += sizeof(struct zswap_header);
dst = kmap_atomic(page);
- tfm = *get_cpu_ptr(entry->pool->tfm);
+ local_lock(zswap_cpu_lock);
+ tfm = *this_cpu_ptr(entry->pool->tfm);
ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
- put_cpu_ptr(entry->pool->tfm);
+ local_unlock(zswap_cpu_lock);
kunmap_atomic(dst);
zpool_unmap_handle(entry->pool->zpool, entry->handle);
BUG_ON(ret);