@@ -75,12 +75,11 @@ void hv_remove_vmbus_handler(void)
DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- u64 ip = regs ? instruction_pointer(regs) : 0;
inc_irq_stat(hyperv_stimer0_count);
if (hv_stimer0_handler)
hv_stimer0_handler();
- add_interrupt_randomness(HYPERV_STIMER0_VECTOR, 0, ip);
+ add_interrupt_randomness(HYPERV_STIMER0_VECTOR);
ack_APIC_irq();
set_irq_regs(old_regs);
@@ -200,7 +200,7 @@
* void add_device_randomness(const void *buf, unsigned int size);
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq, int irq_flags);
+ * void add_interrupt_randomness(int irq);
* void add_disk_randomness(struct gendisk *disk);
*
* add_device_randomness() is for adding data to the random pool that
@@ -1242,55 +1242,97 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
return *ptr;
}
-void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
+static bool process_interrupt_randomness_pool(struct fast_pool *fast_pool)
{
struct entropy_store *r;
+
+ if (unlikely(crng_init == 0)) {
+ bool pool_reset = false;
+
+ if ((fast_pool->count >= 64) &&
+ crng_fast_load((char *) fast_pool->pool,
+ sizeof(fast_pool->pool)))
+ pool_reset = true;
+
+ return pool_reset;
+ }
+
+ if ((fast_pool->count < 64) &&
+ !time_after(jiffies, fast_pool->last + HZ))
+ return false;
+
+ r = &input_pool;
+ if (!spin_trylock(&r->lock))
+ return false;
+
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
+ spin_unlock(&r->lock);
+
+ /* award one bit for the contents of the fast pool */
+ credit_entropy_bits(r, 1);
+ return true;
+}
+
+#ifdef CONFIG_PREEMPT_RT
+void process_interrupt_randomness(void)
+{
+ struct fast_pool *cpu_pool;
+ struct fast_pool fast_pool;
+
+ lockdep_assert_irqs_enabled();
+
+ migrate_disable();
+ cpu_pool = this_cpu_ptr(&irq_randomness);
+
+ local_irq_disable();
+ memcpy(&fast_pool, cpu_pool, sizeof(fast_pool));
+ local_irq_enable();
+
+ if (process_interrupt_randomness_pool(&fast_pool)) {
+ local_irq_disable();
+ cpu_pool->last = jiffies;
+ cpu_pool->count = 0;
+ local_irq_enable();
+ }
+ memzero_explicit(&fast_pool, sizeof(fast_pool));
+ migrate_enable();
+}
+#endif
+
+void add_interrupt_randomness(int irq)
+{
struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
cycles_t cycles = random_get_entropy();
__u32 c_high, j_high;
+ __u64 ip;
if (cycles == 0)
- cycles = get_reg(fast_pool, NULL);
+ cycles = get_reg(fast_pool, regs);
c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
j_high = (sizeof(now) > 4) ? now >> 32 : 0;
fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
fast_pool->pool[1] ^= now ^ c_high;
- if (!ip)
- ip = _RET_IP_;
+ ip = regs ? instruction_pointer(regs) : _RET_IP_;
fast_pool->pool[2] ^= ip;
fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, NULL);
+ get_reg(fast_pool, regs);
fast_mix(fast_pool);
add_interrupt_bench(cycles);
- if (unlikely(crng_init == 0)) {
- if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool))) {
- fast_pool->count = 0;
+ /*
+ * On PREEMPT_RT the entropy can not be fed into the input_pool because
+ * it needs to acquire sleeping locks with disabled interrupts.
+ * This is deferred to the threaded handler.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ if (process_interrupt_randomness_pool(fast_pool)) {
fast_pool->last = now;
+ fast_pool->count = 0;
}
- return;
}
-
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
- return;
-
- r = &input_pool;
- if (!spin_trylock(&r->lock))
- return;
-
- fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
-
- fast_pool->count = 0;
-
- /* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce)
static inline void intel_context_mark_active(struct intel_context *ce)
{
lockdep_assert(lockdep_is_held(&ce->timeline->mutex) ||
- test_bit(CONTEXT_IS_PARKED, &ce->flags));
+ test_bit(CONTEXT_IS_PARKING, &ce->flags));
++ce->active_count;
}
@@ -118,7 +118,7 @@ struct intel_context {
#define CONTEXT_LRCA_DIRTY 9
#define CONTEXT_GUC_INIT 10
#define CONTEXT_PERMA_PIN 11
-#define CONTEXT_IS_PARKED 12
+#define CONTEXT_IS_PARKING 12
struct {
u64 timeout_us;
@@ -180,7 +180,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
* engine->wakeref.count, we may see the request completion and retire
* it causing an underflow of the engine->wakeref.
*/
- set_bit(CONTEXT_IS_PARKED, &ce->flags);
+ set_bit(CONTEXT_IS_PARKING, &ce->flags);
GEM_BUG_ON(atomic_read(&ce->timeline->active_count) < 0);
rq = __i915_request_create(ce, GFP_NOWAIT);
@@ -212,7 +212,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
result = false;
out_unlock:
- clear_bit(CONTEXT_IS_PARKED, &ce->flags);
+ clear_bit(CONTEXT_IS_PARKING, &ce->flags);
return result;
}
@@ -643,7 +643,7 @@ i915_request_timeline(const struct i915_request *rq)
/* Valid only while the request is being constructed (or retired). */
return rcu_dereference_protected(rq->timeline,
lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex) ||
- test_bit(CONTEXT_IS_PARKED, &rq->context->flags));
+ test_bit(CONTEXT_IS_PARKING, &rq->context->flags));
}
static inline struct i915_gem_context *
@@ -19,7 +19,6 @@
#include <linux/atomic.h>
#include <linux/hyperv.h>
#include <linux/interrupt.h>
-#include <linux/irq.h>
#include "hv_trace.h"
@@ -22,7 +22,6 @@
#include <linux/clockchips.h>
#include <linux/cpu.h>
#include <linux/sched/task_stack.h>
-#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/notifier.h>
@@ -1338,8 +1337,6 @@ static void vmbus_isr(void)
void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
- struct pt_regs *regs = get_irq_regs();
- u64 ip = regs ? instruction_pointer(regs) : 0;
bool handled = false;
if (unlikely(page_addr == NULL))
@@ -1384,7 +1381,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(vmbus_interrupt, 0, ip);
+ add_interrupt_randomness(vmbus_interrupt);
}
static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id)
@@ -68,7 +68,6 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
- u64 random_ip;
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
const struct cpumask *percpu_affinity;
@@ -35,7 +35,8 @@ static inline void add_latent_entropy(void) {}
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
-extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy;
+extern void add_interrupt_randomness(int irq) __latent_entropy;
+extern void process_interrupt_randomness(void);
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
@@ -66,7 +66,7 @@
#include <linux/seqlock.h>
struct u64_stats_sync {
-#if BITS_PER_LONG==32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
+#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
seqcount_t seq;
#endif
};
@@ -575,8 +575,6 @@ EXPORT_SYMBOL_GPL(handle_simple_irq);
*/
void handle_untracked_irq(struct irq_desc *desc)
{
- unsigned int flags = 0;
-
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
@@ -593,7 +591,7 @@ void handle_untracked_irq(struct irq_desc *desc)
irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
raw_spin_unlock(&desc->lock);
- __handle_irq_event_percpu(desc, &flags);
+ __handle_irq_event_percpu(desc);
raw_spin_lock(&desc->lock);
irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
@@ -136,7 +136,7 @@ void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action)
wake_up_process(action->thread);
}
-irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags)
+irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
{
irqreturn_t retval = IRQ_NONE;
unsigned int irq = desc->irq_data.irq;
@@ -174,10 +174,6 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
}
__irq_wake_thread(desc, action);
-
- fallthrough; /* to add to randomness */
- case IRQ_HANDLED:
- *flags |= action->flags;
break;
default:
@@ -192,18 +188,11 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
{
- struct pt_regs *regs = get_irq_regs();
- u64 ip = regs ? instruction_pointer(regs) : 0;
- unsigned int flags = 0;
irqreturn_t retval;
- retval = __handle_irq_event_percpu(desc, &flags);
+ retval = __handle_irq_event_percpu(desc);
-#ifdef CONFIG_PREEMPT_RT
- desc->random_ip = ip;
-#else
- add_interrupt_randomness(desc->irq_data.irq, flags, ip);
-#endif
+ add_interrupt_randomness(desc->irq_data.irq);
if (!irq_settings_no_debug(desc))
note_interrupt(desc, retval);
@@ -103,7 +103,7 @@ extern int __irq_get_irqchip_state(struct irq_data *data,
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
-irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags);
+irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event_percpu(struct irq_desc *desc);
irqreturn_t handle_irq_event(struct irq_desc *desc);
@@ -1281,12 +1281,9 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
- if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
- migrate_disable();
- add_interrupt_randomness(action->irq, 0,
- desc->random_ip ^ (unsigned long) action);
- migrate_enable();
- }
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
+ process_interrupt_randomness();
+
wake_threads_waitq(desc);
}
@@ -538,28 +538,9 @@ void oops_enter(void)
trigger_all_cpu_backtrace();
}
-/*
- * 64-bit random ID for oopses:
- */
-static u64 oops_id;
-
-static int init_oops_id(void)
-{
-#ifndef CONFIG_PREEMPT_RT
- if (!oops_id)
- get_random_bytes(&oops_id, sizeof(oops_id));
- else
-#endif
- oops_id++;
-
- return 0;
-}
-late_initcall(init_oops_id);
-
static void print_oops_end_marker(void)
{
- init_oops_id();
- pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
+ pr_warn("---[ end trace %016llx ]---\n", 0ULL);
pr_flush(1000, true);
}
@@ -1 +1 @@
--rt9
+-rt10
@@ -3838,12 +3838,12 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
* separate lock before trying to get qdisc main lock.
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
+ * On PREEMPT_RT it is possible to preempt the qdisc owner during xmit
+ * and then other tasks will only enqueue packets. The packets will be
+ * sent after the qdisc owner is scheduled again. To prevent this
+ * scenario the task always serialize on the lock.
*/
-#ifdef CONFIG_PREEMPT_RT
- contended = true;
-#else
- contended = qdisc_is_running(q);
-#endif
+ contended = IS_ENABLED(CONFIG_PREEMPT_RT) || qdisc_is_running(q);
if (unlikely(contended))
spin_lock(&q->busylock);