@@ -1790,6 +1790,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noirqdebug [X86-32] Disables the code which attempts to detect and
disable unhandled interrupt sources.
+ noirqtime [X86,ARM] Run time disables IRQ_TIME_ACCOUNTING and
+ eliminates the timestamping on irq/softirq entry/exit.
+
no_timer_check [X86,APIC] Disables the code which tests for
broken timer IRQ sources.
@@ -2636,10 +2639,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
as the stability checks done at bootup. Used to enable
high-resolution timer mode on older hardware, and in
virtualized environment.
- [x86] noirqtime: Do not use TSC to do irq accounting.
- Used to run time disable IRQ_TIME_ACCOUNTING on any
- platforms where RDTSC is slow and this accounting
- can add overhead.
+ [x86] noirqtime: obsoleted by "noirqtime" generic option,
+ see it's documentation for details.
turbografx.map[2|3]= [HW,JOY]
TurboGraFX parallel port interface
@@ -144,6 +144,8 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
*/
cd.epoch_ns = 0;
+ enable_sched_clock_irqtime();
+
pr_debug("Registered %pF as sched_clock source\n", read);
}
@@ -805,17 +805,6 @@ config SCHED_MC
making when dealing with multi-core CPU chips at a cost of slightly
increased overhead in some places. If unsure say N here.
-config IRQ_TIME_ACCOUNTING
- bool "Fine granularity task level IRQ time accounting"
- default n
- ---help---
- Select this option to enable fine granularity task irq time
- accounting. This is done by reading a timestamp on each
- transitions between softirq and hardirq state, so there can be a
- small performance impact.
-
- If in doubt, say N here.
-
source "kernel/Kconfig.preempt"
config X86_UP_APIC
@@ -103,14 +103,15 @@ int __init notsc_setup(char *str)
__setup("notsc", notsc_setup);
-static int no_sched_irq_time;
-
static int __init tsc_setup(char *str)
{
if (!strcmp(str, "reliable"))
tsc_clocksource_reliable = 1;
- if (!strncmp(str, "noirqtime", 9))
- no_sched_irq_time = 1;
+ if (!strncmp(str, "noirqtime", 9)) {
+ printk(KERN_WARNING "tsc: tsc=noirqtime is "
+ "obsolete, use noirqtime instead\n");
+ disable_sched_clock_irqtime();
+ }
return 1;
}
@@ -978,8 +979,7 @@ void __init tsc_init(void)
/* now allow native_sched_clock() to use rdtsc */
tsc_disabled = 0;
- if (!no_sched_irq_time)
- enable_sched_clock_irqtime();
+ enable_sched_clock_irqtime();
lpj = ((u64)tsc_khz * 1000);
do_div(lpj, HZ);
@@ -1961,11 +1961,7 @@ extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
-/*
- * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
- * The reason for this explicit opt-in is not to have perf penalty with
- * slow sched_clocks.
- */
+extern int sched_clock_irqtime;
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
@@ -757,11 +757,17 @@ static DEFINE_PER_CPU(u64, cpu_hardirq_time);
static DEFINE_PER_CPU(u64, cpu_softirq_time);
static DEFINE_PER_CPU(u64, irq_start_time);
-static int sched_clock_irqtime;
+
+/*
+ * -1 if not initialized, 0 if disabled with "noirqtime" kernel option
+ * or after unstable clock was detected, 1 if enabled and active.
+ */
+__read_mostly int sched_clock_irqtime = -1;
void enable_sched_clock_irqtime(void)
{
- sched_clock_irqtime = 1;
+ if (sched_clock_irqtime == -1)
+ sched_clock_irqtime = 1;
}
void disable_sched_clock_irqtime(void)
@@ -769,6 +775,14 @@ void disable_sched_clock_irqtime(void)
sched_clock_irqtime = 0;
}
+static int __init irqtime_setup(char *str)
+{
+ sched_clock_irqtime = 0;
+ return 1;
+}
+
+__setup("noirqtime", irqtime_setup);
+
#ifndef CONFIG_64BIT
static DEFINE_PER_CPU(seqcount_t, irq_time_seq);
@@ -822,7 +836,7 @@ void account_system_vtime(struct task_struct *curr)
s64 delta;
int cpu;
- if (!sched_clock_irqtime)
+ if (sched_clock_irqtime < 1)
return;
local_irq_save(flags);
@@ -2852,7 +2866,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
struct rq *rq = this_rq();
- if (sched_clock_irqtime) {
+ if (sched_clock_irqtime > 0) {
irqtime_account_process_tick(p, user_tick, rq);
return;
}
@@ -2886,7 +2900,7 @@ void account_steal_ticks(unsigned long ticks)
void account_idle_ticks(unsigned long ticks)
{
- if (sched_clock_irqtime) {
+ if (sched_clock_irqtime > 0) {
irqtime_account_idle_ticks(ticks);
return;
}
@@ -299,6 +299,18 @@ config SCHEDSTATS
application, you can say N to avoid the very slight overhead
this adds.
+config IRQ_TIME_ACCOUNTING
+ bool "Fine granularity task level IRQ time accounting"
+ depends on X86 || ARM
+ default n
+ ---help---
+ Select this option to enable fine granularity task irq time
+ accounting. This is done by reading a timestamp on each
+ transitions between softirq and hardirq state, so there can be a
+ small performance impact.
+
+ If in doubt, say N here.
+
config TIMER_STATS
bool "Collect kernel timers statistics"
depends on DEBUG_KERNEL && PROC_FS