extent-io-tests.c 10.9 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
/*
 * Copyright (C) 2013 Fusion IO.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/pagemap.h>
#include <linux/sched.h>
21
#include <linux/slab.h>
22
#include <linux/sizes.h>
23
#include "btrfs-tests.h"
24
#include "../ctree.h"
25
26
27
28
29
30
31
32
33
34
35
#include "../extent_io.h"

#define PROCESS_UNLOCK		(1 << 0)
#define PROCESS_RELEASE		(1 << 1)
#define PROCESS_TEST_LOCKED	(1 << 2)

static noinline int process_page_range(struct inode *inode, u64 start, u64 end,
				       unsigned long flags)
{
	int ret;
	struct page *pages[16];
36
37
	unsigned long index = start >> PAGE_SHIFT;
	unsigned long end_index = end >> PAGE_SHIFT;
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
	unsigned long nr_pages = end_index - index + 1;
	int i;
	int count = 0;
	int loops = 0;

	while (nr_pages > 0) {
		ret = find_get_pages_contig(inode->i_mapping, index,
				     min_t(unsigned long, nr_pages,
				     ARRAY_SIZE(pages)), pages);
		for (i = 0; i < ret; i++) {
			if (flags & PROCESS_TEST_LOCKED &&
			    !PageLocked(pages[i]))
				count++;
			if (flags & PROCESS_UNLOCK && PageLocked(pages[i]))
				unlock_page(pages[i]);
53
			put_page(pages[i]);
54
			if (flags & PROCESS_RELEASE)
55
				put_page(pages[i]);
56
57
58
59
60
61
62
63
64
65
66
67
68
		}
		nr_pages -= ret;
		index += ret;
		cond_resched();
		loops++;
		if (loops > 100000) {
			printk(KERN_ERR "stuck in a loop, start %Lu, end %Lu, nr_pages %lu, ret %d\n", start, end, nr_pages, ret);
			break;
		}
	}
	return count;
}

69
static int test_find_delalloc(u32 sectorsize)
70
71
72
73
74
75
{
	struct inode *inode;
	struct extent_io_tree tmp;
	struct page *page;
	struct page *locked_page = NULL;
	unsigned long index = 0;
76
77
	u64 total_dirty = SZ_256M;
	u64 max_bytes = SZ_128M;
78
79
80
81
	u64 start, end, test_start;
	u64 found;
	int ret = -EINVAL;

82
83
	test_msg("Running find delalloc tests\n");

84
85
86
87
88
89
90
91
92
93
94
95
96
	inode = btrfs_new_test_inode();
	if (!inode) {
		test_msg("Failed to allocate test inode\n");
		return -ENOMEM;
	}

	extent_io_tree_init(&tmp, &inode->i_data);

	/*
	 * First go through and create and mark all of our pages dirty, we pin
	 * everything to make sure our pages don't get evicted and screw up our
	 * test.
	 */
97
	for (index = 0; index < (total_dirty >> PAGE_SHIFT); index++) {
98
		page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL);
99
100
101
102
103
104
105
106
107
		if (!page) {
			test_msg("Failed to allocate test page\n");
			ret = -ENOMEM;
			goto out;
		}
		SetPageDirty(page);
		if (index) {
			unlock_page(page);
		} else {
108
			get_page(page);
109
110
111
112
113
114
115
116
			locked_page = page;
		}
	}

	/* Test this scenario
	 * |--- delalloc ---|
	 * |---  search  ---|
	 */
117
	set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
118
119
120
121
122
123
124
125
	start = 0;
	end = 0;
	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
					 &end, max_bytes);
	if (!found) {
		test_msg("Should have found at least one delalloc\n");
		goto out_bits;
	}
126
127
128
	if (start != 0 || end != (sectorsize - 1)) {
		test_msg("Expected start 0 end %u, got start %llu end %llu\n",
			sectorsize - 1, start, end);
129
130
131
132
		goto out_bits;
	}
	unlock_extent(&tmp, start, end);
	unlock_page(locked_page);
133
	put_page(locked_page);
134
135
136
137
138
139
140

	/*
	 * Test this scenario
	 *
	 * |--- delalloc ---|
	 *           |--- search ---|
	 */
141
	test_start = SZ_64M;
142
	locked_page = find_lock_page(inode->i_mapping,
143
				     test_start >> PAGE_SHIFT);
144
145
146
147
	if (!locked_page) {
		test_msg("Couldn't find the locked page\n");
		goto out_bits;
	}
148
	set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
	start = test_start;
	end = 0;
	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
					 &end, max_bytes);
	if (!found) {
		test_msg("Couldn't find delalloc in our range\n");
		goto out_bits;
	}
	if (start != test_start || end != max_bytes - 1) {
		test_msg("Expected start %Lu end %Lu, got start %Lu, end "
			 "%Lu\n", test_start, max_bytes - 1, start, end);
		goto out_bits;
	}
	if (process_page_range(inode, start, end,
			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
		test_msg("There were unlocked pages in the range\n");
		goto out_bits;
	}
	unlock_extent(&tmp, start, end);
	/* locked_page was unlocked above */
169
	put_page(locked_page);
170
171
172
173
174
175

	/*
	 * Test this scenario
	 * |--- delalloc ---|
	 *                    |--- search ---|
	 */
176
	test_start = max_bytes + sectorsize;
177
	locked_page = find_lock_page(inode->i_mapping, test_start >>
178
				     PAGE_SHIFT);
179
	if (!locked_page) {
180
		test_msg("Couldn't find the locked page\n");
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
		goto out_bits;
	}
	start = test_start;
	end = 0;
	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
					 &end, max_bytes);
	if (found) {
		test_msg("Found range when we shouldn't have\n");
		goto out_bits;
	}
	if (end != (u64)-1) {
		test_msg("Did not return the proper end offset\n");
		goto out_bits;
	}

	/*
	 * Test this scenario
	 * [------- delalloc -------|
	 * [max_bytes]|-- search--|
	 *
	 * We are re-using our test_start from above since it works out well.
	 */
203
	set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
	start = test_start;
	end = 0;
	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
					 &end, max_bytes);
	if (!found) {
		test_msg("Didn't find our range\n");
		goto out_bits;
	}
	if (start != test_start || end != total_dirty - 1) {
		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
			 test_start, total_dirty - 1, start, end);
		goto out_bits;
	}
	if (process_page_range(inode, start, end,
			       PROCESS_TEST_LOCKED | PROCESS_UNLOCK)) {
		test_msg("Pages in range were not all locked\n");
		goto out_bits;
	}
	unlock_extent(&tmp, start, end);

	/*
	 * Now to test where we run into a page that is no longer dirty in the
	 * range we want to find.
	 */
228
	page = find_get_page(inode->i_mapping,
229
			     (max_bytes + SZ_1M) >> PAGE_SHIFT);
230
231
232
233
234
	if (!page) {
		test_msg("Couldn't find our page\n");
		goto out_bits;
	}
	ClearPageDirty(page);
235
	put_page(page);
236
237
238
239
240
241
242

	/* We unlocked it in the previous test */
	lock_page(locked_page);
	start = test_start;
	end = 0;
	/*
	 * Currently if we fail to find dirty pages in the delalloc range we
243
	 * will adjust max_bytes down to PAGE_SIZE and then re-search.  If
244
245
246
247
248
249
250
251
252
	 * this changes at any point in the future we will need to fix this
	 * tests expected behavior.
	 */
	found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
					 &end, max_bytes);
	if (!found) {
		test_msg("Didn't find our range\n");
		goto out_bits;
	}
253
	if (start != test_start && end != test_start + PAGE_SIZE - 1) {
254
		test_msg("Expected start %Lu end %Lu, got start %Lu end %Lu\n",
255
			 test_start, test_start + PAGE_SIZE - 1, start,
256
257
258
259
260
261
262
263
264
265
			 end);
		goto out_bits;
	}
	if (process_page_range(inode, start, end, PROCESS_TEST_LOCKED |
			       PROCESS_UNLOCK)) {
		test_msg("Pages in range were not all locked\n");
		goto out_bits;
	}
	ret = 0;
out_bits:
266
	clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1);
267
268
out:
	if (locked_page)
269
		put_page(locked_page);
270
271
272
273
274
275
	process_page_range(inode, 0, total_dirty - 1,
			   PROCESS_UNLOCK | PROCESS_RELEASE);
	iput(inode);
	return ret;
}

276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb,
			     unsigned long len)
{
	unsigned long i, x;

	memset(bitmap, 0, len);
	memset_extent_buffer(eb, 0, 0, len);
	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
		test_msg("Bitmap was not zeroed\n");
		return -EINVAL;
	}

	bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
	extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
		test_msg("Setting all bits failed\n");
		return -EINVAL;
	}

	bitmap_clear(bitmap, 0, len * BITS_PER_BYTE);
	extent_buffer_bitmap_clear(eb, 0, 0, len * BITS_PER_BYTE);
	if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
		test_msg("Clearing all bits failed\n");
		return -EINVAL;
	}

302
303
304
305
306
307
308
309
310
311
312
	/* Straddling pages test */
	if (len > PAGE_SIZE) {
		bitmap_set(bitmap,
			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
			sizeof(long) * BITS_PER_BYTE);
		extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0,
					sizeof(long) * BITS_PER_BYTE);
		if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
			test_msg("Setting straddling pages failed\n");
			return -EINVAL;
		}
313

314
315
316
317
318
319
320
321
322
323
324
		bitmap_set(bitmap, 0, len * BITS_PER_BYTE);
		bitmap_clear(bitmap,
			(PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE,
			sizeof(long) * BITS_PER_BYTE);
		extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE);
		extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0,
					sizeof(long) * BITS_PER_BYTE);
		if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) {
			test_msg("Clearing straddling pages failed\n");
			return -EINVAL;
		}
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
	}

	/*
	 * Generate a wonky pseudo-random bit pattern for the sake of not using
	 * something repetitive that could miss some hypothetical off-by-n bug.
	 */
	x = 0;
	for (i = 0; i < len / sizeof(long); i++) {
		x = (0x19660dULL * (u64)x + 0x3c6ef35fULL) & 0xffffffffUL;
		bitmap[i] = x;
	}
	write_extent_buffer(eb, bitmap, 0, len);

	for (i = 0; i < len * BITS_PER_BYTE; i++) {
		int bit, bit1;

		bit = !!test_bit(i, bitmap);
		bit1 = !!extent_buffer_test_bit(eb, 0, i);
		if (bit1 != bit) {
			test_msg("Testing bit pattern failed\n");
			return -EINVAL;
		}

		bit1 = !!extent_buffer_test_bit(eb, i / BITS_PER_BYTE,
						i % BITS_PER_BYTE);
		if (bit1 != bit) {
			test_msg("Testing bit pattern with offset failed\n");
			return -EINVAL;
		}
	}

	return 0;
}

359
static int test_eb_bitmaps(u32 sectorsize, u32 nodesize)
360
{
361
	unsigned long len;
362
363
364
365
366
	unsigned long *bitmap;
	struct extent_buffer *eb;
	int ret;

	test_msg("Running extent buffer bitmap tests\n");
367
368
369
370
371
372
373

	/*
	 * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than
	 * BTRFS_MAX_METADATA_BLOCKSIZE.
	 */
	len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE)
		? sectorsize * 4 : sectorsize;
374

375
	bitmap = kmalloc(len, GFP_KERNEL);
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
	if (!bitmap) {
		test_msg("Couldn't allocate test bitmap\n");
		return -ENOMEM;
	}

	eb = __alloc_dummy_extent_buffer(NULL, 0, len);
	if (!eb) {
		test_msg("Couldn't allocate test extent buffer\n");
		kfree(bitmap);
		return -ENOMEM;
	}

	ret = __test_eb_bitmaps(bitmap, eb, len);
	if (ret)
		goto out;

	/* Do it over again with an extent buffer which isn't page-aligned. */
	free_extent_buffer(eb);
394
	eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len);
395
396
397
398
399
400
401
402
403
404
405
406
407
	if (!eb) {
		test_msg("Couldn't allocate test extent buffer\n");
		kfree(bitmap);
		return -ENOMEM;
	}

	ret = __test_eb_bitmaps(bitmap, eb, len);
out:
	free_extent_buffer(eb);
	kfree(bitmap);
	return ret;
}

408
int btrfs_test_extent_io(u32 sectorsize, u32 nodesize)
409
{
410
411
412
413
	int ret;

	test_msg("Running extent I/O tests\n");

414
	ret = test_find_delalloc(sectorsize);
415
416
417
	if (ret)
		goto out;

418
	ret = test_eb_bitmaps(sectorsize, nodesize);
419
420
421
out:
	test_msg("Extent I/O tests finished\n");
	return ret;
422
}