zram_drv.c 17.2 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5
6
7
8
9
10
11
12
13
14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 * Project home: http://compcache.googlecode.com
 */

15
#define KMSG_COMPONENT "zram"
16
17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18
19
20
21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22
23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25
26
27
28
29
30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32
33
34
35
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>

36
#include "zram_drv.h"
37
38

/* Globals */
39
static int zram_major;
40
struct zram *zram_devices;
41
42

/* Module params (documentation at end) */
43
static unsigned int num_devices;
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

static void zram_stat_inc(u32 *v)
{
	*v = *v + 1;
}

static void zram_stat_dec(u32 *v)
{
	*v = *v - 1;
}

static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
{
	spin_lock(&zram->stat64_lock);
	*v = *v + inc;
	spin_unlock(&zram->stat64_lock);
}

static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
{
	spin_lock(&zram->stat64_lock);
	*v = *v - dec;
	spin_unlock(&zram->stat64_lock);
}

static void zram_stat64_inc(struct zram *zram, u64 *v)
{
	zram_stat64_add(zram, v, 1);
}
73

74
75
static int zram_test_flag(struct zram *zram, u32 index,
			enum zram_pageflags flag)
76
{
77
	return zram->table[index].flags & BIT(flag);
78
79
}

80
81
static void zram_set_flag(struct zram *zram, u32 index,
			enum zram_pageflags flag)
82
{
83
	zram->table[index].flags |= BIT(flag);
84
85
}

86
87
static void zram_clear_flag(struct zram *zram, u32 index,
			enum zram_pageflags flag)
88
{
89
	zram->table[index].flags &= ~BIT(flag);
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
}

static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

107
static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
108
{
109
	if (!zram->disksize) {
110
111
112
113
114
		pr_info(
		"disk size not provided. You can use disksize_kb module "
		"param to specify size.\nUsing default: (%u%% of RAM).\n",
		default_disksize_perc_ram
		);
115
		zram->disksize = default_disksize_perc_ram *
116
117
118
					(totalram_bytes / 100);
	}

119
	if (zram->disksize > 2 * (totalram_bytes)) {
120
		pr_info(
121
		"There is little point creating a zram of greater than "
122
		"twice the size of memory since we expect a 2:1 compression "
123
124
		"ratio. Note that zram uses about 0.1%% of the size of "
		"the disk when not in use so a huge zram is "
125
126
		"wasteful.\n"
		"\tMemory Size: %zu kB\n"
127
		"\tSize you selected: %llu kB\n"
128
		"Continuing anyway ...\n",
129
		totalram_bytes >> 10, zram->disksize
130
131
132
		);
	}

133
	zram->disksize &= PAGE_MASK;
134
135
}

136
static void zram_free_page(struct zram *zram, size_t index)
137
{
138
	unsigned long handle = zram->table[index].handle;
139
	u16 size = zram->table[index].size;
140

141
	if (unlikely(!handle)) {
142
143
144
145
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
146
147
148
		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
			zram_clear_flag(zram, index, ZRAM_ZERO);
			zram_stat_dec(&zram->stats.pages_zero);
149
150
151
152
		}
		return;
	}

153
154
	if (unlikely(size > max_zpage_size))
		zram_stat_dec(&zram->stats.bad_compress);
155

156
	zs_free(zram->mem_pool, handle);
157

158
	if (size <= PAGE_SIZE / 2)
159
		zram_stat_dec(&zram->stats.good_compress);
160

161
162
	zram_stat64_sub(zram, &zram->stats.compr_size,
			zram->table[index].size);
163
	zram_stat_dec(&zram->stats.pages_stored);
164

165
	zram->table[index].handle = 0;
166
	zram->table[index].size = 0;
167
168
}

169
static void handle_zero_page(struct bio_vec *bvec)
170
{
171
	struct page *page = bvec->bv_page;
172
173
	void *user_mem;

174
	user_mem = kmap_atomic(page);
175
	memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
176
	kunmap_atomic(user_mem);
177

178
	flush_dcache_page(page);
179
180
}

181
182
183
184
185
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

186
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
187
			  u32 index, int offset, struct bio *bio)
188
{
189
190
191
	int ret;
	size_t clen;
	struct page *page;
192
	unsigned char *user_mem, *cmem, *uncmem = NULL;
193

194
	page = bvec->bv_page;
195

196
	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
197
		handle_zero_page(bvec);
198
199
		return 0;
	}
200

201
	/* Requested page is not present in compressed area */
202
	if (unlikely(!zram->table[index].handle)) {
203
204
		pr_debug("Read before write: sector=%lu, size=%u",
			 (ulong)(bio->bi_sector), bio->bi_size);
205
		handle_zero_page(bvec);
206
207
		return 0;
	}
208

209
210
211
212
213
214
215
216
217
	if (is_partial_io(bvec)) {
		/* Use  a temporary buffer to decompress the page */
		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (!uncmem) {
			pr_info("Error allocating temp memory!\n");
			return -ENOMEM;
		}
	}

218
	user_mem = kmap_atomic(page);
219
220
	if (!is_partial_io(bvec))
		uncmem = user_mem;
221
	clen = PAGE_SIZE;
222

223
224
	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle,
				ZS_MM_RO);
225

226
	ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
227
228
229
230
231
232
233
				    uncmem, &clen);

	if (is_partial_io(bvec)) {
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
		       bvec->bv_len);
		kfree(uncmem);
	}
234

235
	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
236
	kunmap_atomic(user_mem);
237

238
239
240
241
242
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		return ret;
243
	}
244

245
	flush_dcache_page(page);
246

247
	return 0;
248
249
}

250
251
252
253
254
static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
{
	int ret;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
255
	unsigned long handle = zram->table[index].handle;
256

257
	if (zram_test_flag(zram, index, ZRAM_ZERO) || !handle) {
258
259
260
261
		memset(mem, 0, PAGE_SIZE);
		return 0;
	}

262
	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
263
	ret = lzo1x_decompress_safe(cmem, zram->table[index].size,
264
				    mem, &clen);
265
	zs_unmap_object(zram->mem_pool, handle);
266
267
268
269
270
271
272
273
274
275
276
277
278

	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		zram_stat64_inc(zram, &zram->stats.failed_reads);
		return ret;
	}

	return 0;
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
279
{
280
281
	int ret;
	size_t clen;
282
	unsigned long handle;
283
	struct page *page;
284
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
285

286
287
	page = bvec->bv_page;
	src = zram->compress_buffer;
288

289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
		if (!uncmem) {
			pr_info("Error allocating temp memory!\n");
			ret = -ENOMEM;
			goto out;
		}
		ret = zram_read_before_write(zram, uncmem, index);
		if (ret) {
			kfree(uncmem);
			goto out;
		}
	}

307
308
309
310
	/*
	 * System overwrites unused sectors. Free memory associated
	 * with this sector now.
	 */
311
	if (zram->table[index].handle ||
312
313
	    zram_test_flag(zram, index, ZRAM_ZERO))
		zram_free_page(zram, index);
314

315
	user_mem = kmap_atomic(page);
316
317
318
319
320
321
322
323

	if (is_partial_io(bvec))
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
	else
		uncmem = user_mem;

	if (page_zero_filled(uncmem)) {
324
		kunmap_atomic(user_mem);
325
326
		if (is_partial_io(bvec))
			kfree(uncmem);
327
328
		zram_stat_inc(&zram->stats.pages_zero);
		zram_set_flag(zram, index, ZRAM_ZERO);
329
330
		ret = 0;
		goto out;
331
	}
332

333
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
334
			       zram->compress_workmem);
335

336
	kunmap_atomic(user_mem);
337
338
	if (is_partial_io(bvec))
			kfree(uncmem);
339

340
341
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
342
		goto out;
343
	}
344

345
346
	if (unlikely(clen > max_zpage_size))
		zram_stat_inc(&zram->stats.bad_compress);
347

348
	handle = zs_malloc(zram->mem_pool, clen);
349
	if (!handle) {
350
351
		pr_info("Error allocating memory for compressed "
			"page: %u, size=%zu\n", index, clen);
352
353
		ret = -ENOMEM;
		goto out;
354
	}
355
	cmem = zs_map_object(zram->mem_pool, handle, ZS_MM_WO);
356

357
	memcpy(cmem, src, clen);
358

359
	zs_unmap_object(zram->mem_pool, handle);
360
361
362

	zram->table[index].handle = handle;
	zram->table[index].size = clen;
363

364
365
366
367
368
	/* Update stats */
	zram_stat64_add(zram, &zram->stats.compr_size, clen);
	zram_stat_inc(&zram->stats.pages_stored);
	if (clen <= PAGE_SIZE / 2)
		zram_stat_inc(&zram->stats.good_compress);
369

370
	return 0;
371
372
373
374
375

out:
	if (ret)
		zram_stat64_inc(zram, &zram->stats.failed_writes);
	return ret;
376
377
378
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
379
			int offset, struct bio *bio, int rw)
380
{
381
	int ret;
382

383
384
385
386
387
388
389
390
391
392
393
	if (rw == READ) {
		down_read(&zram->lock);
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
		up_read(&zram->lock);
	} else {
		down_write(&zram->lock);
		ret = zram_bvec_write(zram, bvec, index, offset);
		up_write(&zram->lock);
	}

	return ret;
394
395
396
397
398
399
400
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
401
402
403
404
}

static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
405
	int i, offset;
406
407
408
409
410
411
412
413
414
415
416
417
418
	u32 index;
	struct bio_vec *bvec;

	switch (rw) {
	case READ:
		zram_stat64_inc(zram, &zram->stats.num_reads);
		break;
	case WRITE:
		zram_stat64_inc(zram, &zram->stats.num_writes);
		break;
	}

	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
419
	offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
420
421

	bio_for_each_segment(bvec, bio, i) {
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
		int max_transfer_size = PAGE_SIZE - offset;

		if (bvec->bv_len > max_transfer_size) {
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

			bv.bv_page = bvec->bv_page;
			bv.bv_len = max_transfer_size;
			bv.bv_offset = bvec->bv_offset;

			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
				goto out;

			bv.bv_len = bvec->bv_len - max_transfer_size;
			bv.bv_offset += max_transfer_size;
			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
				goto out;
		} else
			if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
			    < 0)
				goto out;

		update_position(&index, &offset, bvec);
448
	}
449
450
451

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
452
	return;
453
454
455
456
457
458

out:
	bio_io_error(bio);
}

/*
459
 * Check if request is within bounds and aligned on zram logical blocks.
460
 */
461
static inline int valid_io_request(struct zram *zram, struct bio *bio)
462
463
{
	if (unlikely(
464
		(bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
465
466
		(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
		(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
467
468
469
470

		return 0;
	}

471
	/* I/O request is valid */
472
473
474
475
	return 1;
}

/*
476
 * Handler function for all zram I/O requests.
477
 */
478
static void zram_make_request(struct request_queue *queue, struct bio *bio)
479
{
480
	struct zram *zram = queue->queuedata;
481

482
483
484
485
486
487
488
	if (unlikely(!zram->init_done) && zram_init_device(zram))
		goto error;

	down_read(&zram->init_lock);
	if (unlikely(!zram->init_done))
		goto error_unlock;

489
490
	if (!valid_io_request(zram, bio)) {
		zram_stat64_inc(zram, &zram->stats.invalid_io);
491
		goto error_unlock;
492
493
	}

494
	__zram_make_request(zram, bio, bio_data_dir(bio));
495
	up_read(&zram->init_lock);
496

497
	return;
498
499
500
501
502

error_unlock:
	up_read(&zram->init_lock);
error:
	bio_io_error(bio);
503
504
}

505
void __zram_reset_device(struct zram *zram)
506
{
507
	size_t index;
508

509
	zram->init_done = 0;
510

511
	/* Free various per-device buffers */
512
513
	kfree(zram->compress_workmem);
	free_pages((unsigned long)zram->compress_buffer, 1);
514

515
516
	zram->compress_workmem = NULL;
	zram->compress_buffer = NULL;
517

518
519
	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
520
		unsigned long handle = zram->table[index].handle;
521
		if (!handle)
522
523
			continue;

524
		zs_free(zram->mem_pool, handle);
525
526
	}

527
528
	vfree(zram->table);
	zram->table = NULL;
529

530
	zs_destroy_pool(zram->mem_pool);
531
	zram->mem_pool = NULL;
532
533

	/* Reset stats */
534
	memset(&zram->stats, 0, sizeof(zram->stats));
535

536
	zram->disksize = 0;
537
538
539
540
541
542
543
}

void zram_reset_device(struct zram *zram)
{
	down_write(&zram->init_lock);
	__zram_reset_device(zram);
	up_write(&zram->init_lock);
544
545
}

546
int zram_init_device(struct zram *zram)
547
548
549
550
{
	int ret;
	size_t num_pages;

551
	down_write(&zram->init_lock);
552

553
	if (zram->init_done) {
554
		up_write(&zram->init_lock);
555
		return 0;
556
557
	}

558
	zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
559

560
561
	zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
	if (!zram->compress_workmem) {
562
563
		pr_err("Error allocating compressor working memory!\n");
		ret = -ENOMEM;
564
		goto fail_no_table;
565
566
	}

567
568
	zram->compress_buffer =
		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
569
	if (!zram->compress_buffer) {
570
571
		pr_err("Error allocating compressor buffer space\n");
		ret = -ENOMEM;
572
		goto fail_no_table;
573
574
	}

575
	num_pages = zram->disksize >> PAGE_SHIFT;
Joe Perches's avatar
Joe Perches committed
576
	zram->table = vzalloc(num_pages * sizeof(*zram->table));
577
578
	if (!zram->table) {
		pr_err("Error allocating zram address table\n");
579
		ret = -ENOMEM;
580
		goto fail_no_table;
581
582
	}

583
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
584

585
586
	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
587

588
	zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
589
	if (!zram->mem_pool) {
590
591
592
593
594
		pr_err("Error creating memory pool\n");
		ret = -ENOMEM;
		goto fail;
	}

595
	zram->init_done = 1;
596
	up_write(&zram->init_lock);
597
598
599
600

	pr_debug("Initialization done!\n");
	return 0;

601
602
603
fail_no_table:
	/* To prevent accessing table entries during cleanup */
	zram->disksize = 0;
604
fail:
605
606
	__zram_reset_device(zram);
	up_write(&zram->init_lock);
607
608
609
610
	pr_err("Initialization failed: err=%d\n", ret);
	return ret;
}

Nitin Gupta's avatar
Nitin Gupta committed
611
612
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
613
{
614
	struct zram *zram;
615

616
617
618
	zram = bdev->bd_disk->private_data;
	zram_free_page(zram, index);
	zram_stat64_inc(zram, &zram->stats.notify_free);
619
620
}

621
622
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
623
	.owner = THIS_MODULE
624
625
};

626
static int create_device(struct zram *zram, int device_id)
627
{
628
629
	int ret = 0;

630
	init_rwsem(&zram->lock);
631
	init_rwsem(&zram->init_lock);
632
	spin_lock_init(&zram->stat64_lock);
633

634
635
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
636
637
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
638
639
		ret = -ENOMEM;
		goto out;
640
641
	}

642
643
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
644
645

	 /* gendisk structure */
646
647
648
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
		blk_cleanup_queue(zram->queue);
649
		pr_warn("Error allocating disk structure for device %d\n",
650
			device_id);
651
652
		ret = -ENOMEM;
		goto out;
653
654
	}

655
656
657
658
659
660
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
661

662
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
663
	set_capacity(zram->disk, 0);
664

665
666
667
668
	/*
	 * To ensure that we always get PAGE_SIZE aligned
	 * and n*PAGE_SIZED sized I/O requests.
	 */
669
	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
670
671
	blk_queue_logical_block_size(zram->disk->queue,
					ZRAM_LOGICAL_BLOCK_SIZE);
672
673
	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
674

675
	add_disk(zram->disk);
676

677
678
679
	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
				&zram_disk_attr_group);
	if (ret < 0) {
680
		pr_warn("Error creating sysfs group");
681
682
683
		goto out;
	}

684
	zram->init_done = 0;
685
686
687

out:
	return ret;
688
689
}

690
static void destroy_device(struct zram *zram)
691
{
692
693
694
	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
			&zram_disk_attr_group);

695
696
697
	if (zram->disk) {
		del_gendisk(zram->disk);
		put_disk(zram->disk);
698
699
	}

700
701
	if (zram->queue)
		blk_cleanup_queue(zram->queue);
702
703
}

704
705
706
707
708
unsigned int zram_get_num_devices(void)
{
	return num_devices;
}

709
static int __init zram_init(void)
710
{
711
	int ret, dev_id;
712

713
	if (num_devices > max_num_devices) {
714
		pr_warn("Invalid value for num_devices: %u\n",
715
				num_devices);
716
717
		ret = -EINVAL;
		goto out;
718
719
	}

720
721
	zram_major = register_blkdev(0, "zram");
	if (zram_major <= 0) {
722
		pr_warn("Unable to get major number\n");
723
724
		ret = -EBUSY;
		goto out;
725
726
	}

727
	if (!num_devices) {
728
		pr_info("num_devices not specified. Using default: 1\n");
729
		num_devices = 1;
730
731
732
	}

	/* Allocate the device array and initialize each one */
733
734
	pr_info("Creating %u devices ...\n", num_devices);
	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
735
	if (!zram_devices) {
736
737
738
		ret = -ENOMEM;
		goto unregister;
	}
739

740
	for (dev_id = 0; dev_id < num_devices; dev_id++) {
741
		ret = create_device(&zram_devices[dev_id], dev_id);
742
		if (ret)
743
			goto free_devices;
744
745
	}

746
	return 0;
747

748
free_devices:
749
	while (dev_id)
750
751
		destroy_device(&zram_devices[--dev_id]);
	kfree(zram_devices);
752
unregister:
753
	unregister_blkdev(zram_major, "zram");
754
out:
755
756
757
	return ret;
}

758
static void __exit zram_exit(void)
759
760
{
	int i;
761
	struct zram *zram;
762

763
	for (i = 0; i < num_devices; i++) {
764
		zram = &zram_devices[i];
765

766
767
		destroy_device(zram);
		if (zram->init_done)
768
			zram_reset_device(zram);
769
770
	}

771
	unregister_blkdev(zram_major, "zram");
772

773
	kfree(zram_devices);
774
775
776
	pr_debug("Cleanup done!\n");
}

777
778
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");
779

780
781
module_init(zram_init);
module_exit(zram_exit);
782
783
784

MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
785
MODULE_DESCRIPTION("Compressed RAM Block Device");