kmem.h 7.07 KB
Newer Older
1
2
3
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kmem

4
#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
5
#define _TRACE_KMEM_H
6
7

#include <linux/types.h>
8
#include <linux/tracepoint.h>
9
#include <trace/events/gfpflags.h>
10

11
DECLARE_EVENT_CLASS(kmem_alloc,
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36

	TP_PROTO(unsigned long call_site,
		 const void *ptr,
		 size_t bytes_req,
		 size_t bytes_alloc,
		 gfp_t gfp_flags),

	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
		__field(	size_t,		bytes_req	)
		__field(	size_t,		bytes_alloc	)
		__field(	gfp_t,		gfp_flags	)
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
		__entry->bytes_req	= bytes_req;
		__entry->bytes_alloc	= bytes_alloc;
		__entry->gfp_flags	= gfp_flags;
	),

37
	TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
38
39
40
41
		__entry->call_site,
		__entry->ptr,
		__entry->bytes_req,
		__entry->bytes_alloc,
42
		show_gfp_flags(__entry->gfp_flags))
43
44
);

45
DEFINE_EVENT(kmem_alloc, kmalloc,
46

47
48
	TP_PROTO(unsigned long call_site, const void *ptr,
		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
49

50
51
	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
);
52

53
DEFINE_EVENT(kmem_alloc, kmem_cache_alloc,
54

55
56
	TP_PROTO(unsigned long call_site, const void *ptr,
		 size_t bytes_req, size_t bytes_alloc, gfp_t gfp_flags),
57

58
	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags)
59
60
);

61
DECLARE_EVENT_CLASS(kmem_alloc_node,
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89

	TP_PROTO(unsigned long call_site,
		 const void *ptr,
		 size_t bytes_req,
		 size_t bytes_alloc,
		 gfp_t gfp_flags,
		 int node),

	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
		__field(	size_t,		bytes_req	)
		__field(	size_t,		bytes_alloc	)
		__field(	gfp_t,		gfp_flags	)
		__field(	int,		node		)
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
		__entry->bytes_req	= bytes_req;
		__entry->bytes_alloc	= bytes_alloc;
		__entry->gfp_flags	= gfp_flags;
		__entry->node		= node;
	),

90
	TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
91
92
93
94
		__entry->call_site,
		__entry->ptr,
		__entry->bytes_req,
		__entry->bytes_alloc,
95
		show_gfp_flags(__entry->gfp_flags),
96
97
98
		__entry->node)
);

99
DEFINE_EVENT(kmem_alloc_node, kmalloc_node,
100

101
102
103
	TP_PROTO(unsigned long call_site, const void *ptr,
		 size_t bytes_req, size_t bytes_alloc,
		 gfp_t gfp_flags, int node),
104

105
106
	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
);
107

108
DEFINE_EVENT(kmem_alloc_node, kmem_cache_alloc_node,
109

110
111
112
	TP_PROTO(unsigned long call_site, const void *ptr,
		 size_t bytes_req, size_t bytes_alloc,
		 gfp_t gfp_flags, int node),
113

114
	TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node)
115
116
);

117
DECLARE_EVENT_CLASS(kmem_free,
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135

	TP_PROTO(unsigned long call_site, const void *ptr),

	TP_ARGS(call_site, ptr),

	TP_STRUCT__entry(
		__field(	unsigned long,	call_site	)
		__field(	const void *,	ptr		)
	),

	TP_fast_assign(
		__entry->call_site	= call_site;
		__entry->ptr		= ptr;
	),

	TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
);

136
DEFINE_EVENT(kmem_free, kfree,
137
138
139

	TP_PROTO(unsigned long call_site, const void *ptr),

140
141
	TP_ARGS(call_site, ptr)
);
142

143
DEFINE_EVENT(kmem_free, kmem_cache_free,
144

145
	TP_PROTO(unsigned long call_site, const void *ptr),
146

147
	TP_ARGS(call_site, ptr)
148
);
149

150
TRACE_EVENT(mm_page_free,
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171

	TP_PROTO(struct page *page, unsigned int order),

	TP_ARGS(page, order),

	TP_STRUCT__entry(
		__field(	struct page *,	page		)
		__field(	unsigned int,	order		)
	),

	TP_fast_assign(
		__entry->page		= page;
		__entry->order		= order;
	),

	TP_printk("page=%p pfn=%lu order=%d",
			__entry->page,
			page_to_pfn(__entry->page),
			__entry->order)
);

172
TRACE_EVENT(mm_page_free_batched,
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

	TP_PROTO(struct page *page, int cold),

	TP_ARGS(page, cold),

	TP_STRUCT__entry(
		__field(	struct page *,	page		)
		__field(	int,		cold		)
	),

	TP_fast_assign(
		__entry->page		= page;
		__entry->cold		= cold;
	),

	TP_printk("page=%p pfn=%lu order=0 cold=%d",
			__entry->page,
			page_to_pfn(__entry->page),
			__entry->cold)
);

TRACE_EVENT(mm_page_alloc,

	TP_PROTO(struct page *page, unsigned int order,
			gfp_t gfp_flags, int migratetype),

	TP_ARGS(page, order, gfp_flags, migratetype),

	TP_STRUCT__entry(
		__field(	struct page *,	page		)
		__field(	unsigned int,	order		)
		__field(	gfp_t,		gfp_flags	)
		__field(	int,		migratetype	)
	),

	TP_fast_assign(
		__entry->page		= page;
		__entry->order		= order;
		__entry->gfp_flags	= gfp_flags;
		__entry->migratetype	= migratetype;
	),

	TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s",
		__entry->page,
217
		__entry->page ? page_to_pfn(__entry->page) : 0,
218
219
220
221
222
		__entry->order,
		__entry->migratetype,
		show_gfp_flags(__entry->gfp_flags))
);

223
DECLARE_EVENT_CLASS(mm_page,
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242

	TP_PROTO(struct page *page, unsigned int order, int migratetype),

	TP_ARGS(page, order, migratetype),

	TP_STRUCT__entry(
		__field(	struct page *,	page		)
		__field(	unsigned int,	order		)
		__field(	int,		migratetype	)
	),

	TP_fast_assign(
		__entry->page		= page;
		__entry->order		= order;
		__entry->migratetype	= migratetype;
	),

	TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d",
		__entry->page,
243
		__entry->page ? page_to_pfn(__entry->page) : 0,
244
245
246
247
248
		__entry->order,
		__entry->migratetype,
		__entry->order == 0)
);

249
DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
250

251
	TP_PROTO(struct page *page, unsigned int order, int migratetype),
252

253
254
	TP_ARGS(page, order, migratetype)
);
255

256
DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
257

258
259
260
	TP_PROTO(struct page *page, unsigned int order, int migratetype),

	TP_ARGS(page, order, migratetype),
261
262

	TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
263
264
		__entry->page, page_to_pfn(__entry->page),
		__entry->order, __entry->migratetype)
265
266
);

267
268
269
270
TRACE_EVENT(mm_page_alloc_extfrag,

	TP_PROTO(struct page *page,
			int alloc_order, int fallback_order,
271
272
			int alloc_migratetype, int fallback_migratetype,
			int change_ownership),
273
274
275

	TP_ARGS(page,
		alloc_order, fallback_order,
276
277
		alloc_migratetype, fallback_migratetype,
		change_ownership),
278
279
280
281
282
283
284

	TP_STRUCT__entry(
		__field(	struct page *,	page			)
		__field(	int,		alloc_order		)
		__field(	int,		fallback_order		)
		__field(	int,		alloc_migratetype	)
		__field(	int,		fallback_migratetype	)
285
		__field(	int,		change_ownership	)
286
287
288
289
290
291
292
293
	),

	TP_fast_assign(
		__entry->page			= page;
		__entry->alloc_order		= alloc_order;
		__entry->fallback_order		= fallback_order;
		__entry->alloc_migratetype	= alloc_migratetype;
		__entry->fallback_migratetype	= fallback_migratetype;
294
		__entry->change_ownership	= change_ownership;
295
296
297
298
299
300
301
302
303
304
305
	),

	TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
		__entry->page,
		page_to_pfn(__entry->page),
		__entry->alloc_order,
		__entry->fallback_order,
		pageblock_order,
		__entry->alloc_migratetype,
		__entry->fallback_migratetype,
		__entry->fallback_order < pageblock_order,
306
		__entry->change_ownership)
307
308
);

309
#endif /* _TRACE_KMEM_H */
310

311
312
/* This part must be outside protection */
#include <trace/define_trace.h>