@@ -57,6 +57,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
}
extern void __down_read(struct rw_semaphore *sem);
+extern int __down_read_interruptible(struct rw_semaphore *sem);
extern int __down_read_killable(struct rw_semaphore *sem);
extern int __down_read_trylock(struct rw_semaphore *sem);
extern void __down_write(struct rw_semaphore *sem);
@@ -154,14 +154,12 @@ static inline void tracing_generic_entry_update(struct trace_entry *entry,
unsigned short type,
unsigned int trace_ctx)
{
- struct task_struct *tsk = current;
-
entry->preempt_count = trace_ctx & 0xff;
entry->migrate_disable = (trace_ctx >> 8) & 0xff;
entry->preempt_lazy_count = (trace_ctx >> 16) & 0xff;
- entry->pid = (tsk) ? tsk->pid : 0;
+ entry->pid = current->pid;
entry->type = type;
- entry->flags = trace_ctx >> 24;
+ entry->flags = trace_ctx >> 24;
}
unsigned int _tracing_gen_ctx_flags(unsigned long irqflags);
@@ -256,7 +254,7 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
struct trace_event_file *trace_file,
unsigned long len);
-void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
enum {
TRACE_EVENT_FL_FILTERED_BIT,
@@ -694,7 +694,7 @@ trace_event_raw_event_##call(void *__data, proto) \
\
{ assign; } \
\
- trace_event_buffer_commit__(&fbuffer); \
+ trace_event_buffer_commit(&fbuffer); \
}
/*
* The ftrace_test_probe is compiled out, it is only here as a build time check
@@ -164,6 +164,17 @@ void __down_read(struct rw_semaphore *sem)
WARN_ON_ONCE(ret);
}
+int __down_read_interruptible(struct rw_semaphore *sem)
+{
+ int ret;
+
+ ret = __down_read_common(sem, TASK_INTERRUPTIBLE);
+ if (likely(!ret))
+ return ret;
+ WARN_ONCE(ret != -EINTR, "Unexpected state: %d\n", ret);
+ return -EINTR;
+}
+
int __down_read_killable(struct rw_semaphore *sem)
{
int ret;
@@ -2587,7 +2587,7 @@ static unsigned short migration_disable_value(void)
#endif
}
-static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags)
+unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
{
unsigned int trace_flags = 0;
unsigned int pc;
@@ -2598,21 +2598,16 @@ static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags)
if (irqs_disabled_flags(irqflags))
trace_flags |= TRACE_FLAG_IRQS_OFF;
#else
- trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
+ trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
#endif
if (pc & NMI_MASK)
trace_flags |= TRACE_FLAG_NMI;
if (pc & HARDIRQ_MASK)
trace_flags |= TRACE_FLAG_HARDIRQ;
+ if (in_serving_softirq())
+ trace_flags |= TRACE_FLAG_SOFTIRQ;
- if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
- if (in_serving_softirq())
- trace_flags |= TRACE_FLAG_SOFTIRQ;
- } else {
- if (pc & SOFTIRQ_OFFSET)
- trace_flags |= TRACE_FLAG_SOFTIRQ;
- }
if (tif_need_resched())
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
@@ -2624,11 +2619,6 @@ static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags)
(trace_flags << 24);
}
-unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
-{
- return __tracing_gen_ctx_flags(irqflags);
-}
-
unsigned int tracing_gen_ctx_flags(void)
{
unsigned long irqflags;
@@ -2890,7 +2880,7 @@ int tracepoint_printk_sysctl(struct ctl_table *table, int write,
return ret;
}
-void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
{
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
@@ -2901,7 +2891,7 @@ void trace_event_buffer_commit__(struct trace_event_buffer *fbuffer)
fbuffer->event, fbuffer->entry,
fbuffer->trace_ctx, fbuffer->regs);
}
-EXPORT_SYMBOL_GPL(trace_event_buffer_commit__);
+EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
/*
* Skip 3:
@@ -3092,13 +3082,9 @@ void __trace_stack(struct trace_array *tr, unsigned int trace_ctx,
*/
void trace_dump_stack(int skip)
{
- unsigned long flags;
-
if (tracing_disabled || tracing_selftest_running)
return;
- local_save_flags(flags);
-
#ifndef CONFIG_UNWINDER_ORC
/* Skip 1 to skip this function. */
skip++;
@@ -37,7 +37,7 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
struct ring_buffer_event *event;
struct trace_branch *entry;
unsigned long flags;
- unsigned int pc;
+ unsigned int trace_ctx;
const char *p;
if (current->trace_recursion & TRACE_BRANCH_BIT)
@@ -3677,7 +3677,6 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct ftrace_entry *entry;
- unsigned long flags;
unsigned int trace_ctx;
long disabled;
int cpu;
@@ -3690,8 +3689,6 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
if (disabled != 1)
goto out;
- local_save_flags(flags);
-
event = trace_event_buffer_lock_reserve(&buffer, &event_trace_file,
TRACE_FN, sizeof(*entry),
trace_ctx);
@@ -25,7 +25,7 @@ trace_inject_entry(struct trace_event_file *file, void *rec, int len)
if (entry) {
memcpy(entry, rec, len);
written = len;
- trace_event_buffer_commit__(&fbuffer);
+ trace_event_buffer_commit(&fbuffer);
}
rcu_read_unlock_sched();
@@ -504,7 +504,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
}
}
- trace_event_buffer_commit__(&fbuffer);
+ trace_event_buffer_commit(&fbuffer);
out:
ring_buffer_nest_end(buffer);
}
@@ -1494,7 +1494,7 @@ __synth_event_trace_start(struct trace_event_file *file,
static inline void
__synth_event_trace_end(struct synth_event_trace_state *trace_state)
{
- trace_event_buffer_commit__(&trace_state->fbuffer);
+ trace_event_buffer_commit(&trace_state->fbuffer);
ring_buffer_nest_end(trace_state->buffer);
}
@@ -185,7 +185,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
unsigned long flags;
long disabled;
int cpu;
- int trace_ctx;
+ unsigned int trace_ctx;
if (unlikely(!tr->function_enabled))
return;
@@ -200,7 +200,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) {
- trace_ctx = tracing_gen_ctx_flags();
+ trace_ctx = _tracing_gen_ctx_flags(flags);
trace_function(tr, ip, parent_ip, trace_ctx);
__trace_stack(tr, trace_ctx, STACK_SKIP);
}
@@ -405,10 +405,8 @@ ftrace_traceoff(unsigned long ip, unsigned long parent_ip,
static __always_inline void trace_stack(struct trace_array *tr)
{
- unsigned long flags;
unsigned int trace_ctx;
- local_save_flags(flags);
trace_ctx = tracing_gen_ctx_flags();
__trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
@@ -1402,7 +1402,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
- trace_event_buffer_commit__(&fbuffer);
+ trace_event_buffer_commit(&fbuffer);
}
static void
@@ -1449,7 +1449,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
- trace_event_buffer_commit__(&fbuffer);
+ trace_event_buffer_commit(&fbuffer);
}
static void
@@ -116,7 +116,6 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned long flags;
unsigned int trace_ctx;
int ret = 0;
@@ -135,7 +134,6 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace)
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return 0;
- local_save_flags(flags);
ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
preempt_enable_notrace();
@@ -147,7 +145,6 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
{
struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data;
- unsigned long flags;
unsigned int trace_ctx;
ftrace_graph_addr_finish(trace);
@@ -155,7 +152,6 @@ static void wakeup_graph_return(struct ftrace_graph_ret *trace)
if (!func_prolog_preempt_disable(tr, &data, &trace_ctx))
return;
- local_save_flags(flags);
__trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
@@ -456,8 +452,6 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
if (next != wakeup_task)
return;
- trace_ctx = tracing_gen_ctx_flags();
-
/* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
@@ -465,6 +459,8 @@ probe_wakeup_sched_switch(void *ignore, bool preempt,
goto out;
local_irq_save(flags);
+ trace_ctx = _tracing_gen_ctx_flags(flags);
+
arch_spin_lock(&wakeup_lock);
/* We could race with grabbing wakeup_lock */
@@ -528,7 +524,6 @@ probe_wakeup(void *ignore, struct task_struct *p)
{
struct trace_array_cpu *data;
int cpu = smp_processor_id();
- unsigned long flags;
long disabled;
unsigned int trace_ctx;
@@ -551,11 +546,12 @@ probe_wakeup(void *ignore, struct task_struct *p)
(!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio)))
return;
- trace_ctx = tracing_gen_ctx_flags();
disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->array_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1))
goto out;
+ trace_ctx = tracing_gen_ctx_flags();
+
/* interrupts should be off from try_to_wake_up */
arch_spin_lock(&wakeup_lock);
@@ -582,8 +578,6 @@ probe_wakeup(void *ignore, struct task_struct *p)
wakeup_task = get_task_struct(p);
- local_save_flags(flags);
-
data = per_cpu_ptr(wakeup_trace->array_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, trace_ctx);
@@ -1 +1 @@
--rt23
+-rt24