@@ -3311,13 +3311,10 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Called when the last reference to the file is gone.
*/
-static void put_event(struct perf_event *event)
+void __put_event(struct perf_event *event)
{
struct task_struct *owner;
- if (!atomic_long_dec_and_test(&event->refcount))
- return;
-
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/*
@@ -3884,7 +3881,7 @@ static void ring_buffer_detach_all(struct ring_buffer *rb)
again:
rcu_read_lock();
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
- if (!atomic_long_inc_not_zero(&event->refcount)) {
+ if (!try_get_event(event)) {
/*
* This event is en-route to free_event() which will
* detach it and remove it from the list.
@@ -7606,7 +7603,7 @@ inherit_event(struct perf_event *parent_event,
if (IS_ERR(child_event))
return child_event;
- if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
+ if (!try_get_event(parent_event)) {
free_event(child_event);
return NULL;
}
@@ -195,4 +195,16 @@ static inline bool arch_perf_have_user_stack_dump(void)
#define perf_user_stack_pointer(regs) 0
#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
+static inline bool try_get_event(struct perf_event *event)
+{
+ return atomic_long_inc_not_zero(&event->refcount) != 0;
+}
+extern void __put_event(struct perf_event *event);
+static inline void put_event(struct perf_event *event)
+{
+ if (!atomic_long_dec_and_test(&event->refcount))
+ return;
+ __put_event(event);
+}
+
#endif /* _KERNEL_EVENTS_INTERNAL_H */