@@ -75,15 +75,18 @@ TRACE_EVENT(percpu_free_percpu,
TRACE_EVENT(percpu_alloc_percpu_fail,
- TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align),
+ TP_PROTO(bool reserved, bool is_atomic, size_t size, size_t align,
+ bool do_warn, int warn_limit),
- TP_ARGS(reserved, is_atomic, size, align),
+ TP_ARGS(reserved, is_atomic, size, align, do_warn, warn_limit),
TP_STRUCT__entry(
- __field( bool, reserved )
- __field( bool, is_atomic )
- __field( size_t, size )
- __field( size_t, align )
+ __field(bool, reserved)
+ __field(bool, is_atomic)
+ __field(size_t, size)
+ __field(size_t, align)
+ __field(bool, do_warn)
+ __field(int, warn_limit)
),
TP_fast_assign(
@@ -91,11 +94,14 @@ TRACE_EVENT(percpu_alloc_percpu_fail,
__entry->is_atomic = is_atomic;
__entry->size = size;
__entry->align = align;
+ __entry->do_warn = do_warn;
+ __entry->warn_limit = warn_limit;
),
- TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu",
+ TP_printk("reserved=%d is_atomic=%d size=%zu align=%zu do_warn=%d, warn_limit=%d",
__entry->reserved, __entry->is_atomic,
- __entry->size, __entry->align)
+ __entry->size, __entry->align,
+ __entry->do_warn, __entry->warn_limit)
);
TRACE_EVENT(percpu_create_chunk,
@@ -1886,7 +1886,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
fail_unlock:
spin_unlock_irqrestore(&pcpu_lock, flags);
fail:
- trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
+ trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align, do_warn, warn_limit);
if (do_warn && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",