@@ -1359,7 +1359,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
struct hlist_node *next;
struct hlist_head *head;
- /* No race here */
+ /* To avoid recursive kretprobe by NMI, set kprobe busy here */
+ kprobe_busy_begin();
for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
kretprobe_table_lock(hash, &flags);
head = &kretprobe_inst_table[hash];
@@ -1369,6 +1370,8 @@ static void cleanup_rp_inst(struct kretprobe *rp)
}
kretprobe_table_unlock(hash, &flags);
}
+ kprobe_busy_end();
+
free_rp_inst(rp);
}
NOKPROBE_SYMBOL(cleanup_rp_inst);
@@ -1937,17 +1940,6 @@ static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
unsigned long hash, flags = 0;
struct kretprobe_instance *ri;
- /*
- * To avoid deadlocks, prohibit return probing in NMI contexts,
- * just skip the probe and increase the (inexact) 'nmissed'
- * statistical counter, so that the user is informed that
- * something happened:
- */
- if (unlikely(in_nmi())) {
- rp->nmissed++;
- return 0;
- }
-
/* TODO: consider to only swap the RA after the last pre_handler fired */
hash = hash_ptr(current, KPROBE_HASH_BITS);
raw_spin_lock_irqsave(&rp->lock, flags);