testmgr.c 85.2 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Algorithm testing framework and tests.
 *
 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
 * Copyright (c) 2002 Jean-Francois Dive <jef@linuxbe.org>
 * Copyright (c) 2007 Nokia Siemens Networks
 * Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
 *
9 10 11 12 13 14 15
 * Updated RFC4106 AES-GCM testing.
 *    Authors: Aidan O'Mahony (aidan.o.mahony@intel.com)
 *             Adrian Hoban <adrian.hoban@intel.com>
 *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 *             Tadeusz Struk (tadeusz.struk@intel.com)
 *    Copyright (c) 2010, Intel Corporation.
 *
16 17 18 19 20 21 22
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the Free
 * Software Foundation; either version 2 of the License, or (at your option)
 * any later version.
 *
 */

23
#include <crypto/aead.h>
24
#include <crypto/hash.h>
25
#include <crypto/skcipher.h>
26
#include <linux/err.h>
27
#include <linux/fips.h>
28 29 30 31
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/string.h>
32
#include <crypto/rng.h>
33
#include <crypto/drbg.h>
34
#include <crypto/akcipher.h>
35 36

#include "internal.h"
37

38
#ifdef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
39 40 41 42 43 44 45 46 47

/* a perfect nop */
int alg_test(const char *driver, const char *alg, u32 type, u32 mask)
{
	return 0;
}

#else

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
#include "testmgr.h"

/*
 * Need slab memory for testing (size in number of pages).
 */
#define XBUFSIZE	8

/*
 * Indexes into the xbuf to simulate cross-page access.
 */
#define IDX1		32
#define IDX2		32400
#define IDX3		1
#define IDX4		8193
#define IDX5		22222
#define IDX6		17101
#define IDX7		27333
#define IDX8		3000

/*
* Used by test_cipher()
*/
#define ENCRYPT 1
#define DECRYPT 0

struct tcrypt_result {
	struct completion completion;
	int err;
};

struct aead_test_suite {
	struct {
		struct aead_testvec *vecs;
		unsigned int count;
	} enc, dec;
};

struct cipher_test_suite {
	struct {
		struct cipher_testvec *vecs;
		unsigned int count;
	} enc, dec;
};

struct comp_test_suite {
	struct {
		struct comp_testvec *vecs;
		unsigned int count;
	} comp, decomp;
};

99 100 101 102 103 104 105
struct pcomp_test_suite {
	struct {
		struct pcomp_testvec *vecs;
		unsigned int count;
	} comp, decomp;
};

106 107 108 109 110
struct hash_test_suite {
	struct hash_testvec *vecs;
	unsigned int count;
};

111 112 113 114 115
struct cprng_test_suite {
	struct cprng_testvec *vecs;
	unsigned int count;
};

116 117 118 119 120
struct drbg_test_suite {
	struct drbg_testvec *vecs;
	unsigned int count;
};

121 122 123 124 125
struct akcipher_test_suite {
	struct akcipher_testvec *vecs;
	unsigned int count;
};

126 127 128 129
struct alg_test_desc {
	const char *alg;
	int (*test)(const struct alg_test_desc *desc, const char *driver,
		    u32 type, u32 mask);
130
	int fips_allowed;	/* set if alg is allowed in fips mode */
131 132 133 134 135

	union {
		struct aead_test_suite aead;
		struct cipher_test_suite cipher;
		struct comp_test_suite comp;
136
		struct pcomp_test_suite pcomp;
137
		struct hash_test_suite hash;
138
		struct cprng_test_suite cprng;
139
		struct drbg_test_suite drbg;
140
		struct akcipher_test_suite akcipher;
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
	} suite;
};

static unsigned int IDX[8] = { IDX1, IDX2, IDX3, IDX4, IDX5, IDX6, IDX7, IDX8 };

static void hexdump(unsigned char *buf, unsigned int len)
{
	print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET,
			16, 1,
			buf, len, false);
}

static void tcrypt_complete(struct crypto_async_request *req, int err)
{
	struct tcrypt_result *res = req->data;

	if (err == -EINPROGRESS)
		return;

	res->err = err;
	complete(&res->completion);
}

164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
static int testmgr_alloc_buf(char *buf[XBUFSIZE])
{
	int i;

	for (i = 0; i < XBUFSIZE; i++) {
		buf[i] = (void *)__get_free_page(GFP_KERNEL);
		if (!buf[i])
			goto err_free_buf;
	}

	return 0;

err_free_buf:
	while (i-- > 0)
		free_page((unsigned long)buf[i]);

	return -ENOMEM;
}

static void testmgr_free_buf(char *buf[XBUFSIZE])
{
	int i;

	for (i = 0; i < XBUFSIZE; i++)
		free_page((unsigned long)buf[i]);
}

191
static int wait_async_op(struct tcrypt_result *tr, int ret)
192 193
{
	if (ret == -EINPROGRESS || ret == -EBUSY) {
194
		wait_for_completion(&tr->completion);
195
		reinit_completion(&tr->completion);
196
		ret = tr->err;
197 198 199 200
	}
	return ret;
}

201 202 203
static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
		       unsigned int tcount, bool use_digest,
		       const int align_offset)
204 205 206 207
{
	const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
	unsigned int i, j, k, temp;
	struct scatterlist sg[8];
208 209
	char *result;
	char *key;
210 211 212
	struct ahash_request *req;
	struct tcrypt_result tresult;
	void *hash_buff;
213 214 215
	char *xbuf[XBUFSIZE];
	int ret = -ENOMEM;

216 217 218 219 220 221
	result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
	if (!result)
		return ret;
	key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
	if (!key)
		goto out_nobuf;
222 223
	if (testmgr_alloc_buf(xbuf))
		goto out_nobuf;
224 225 226 227 228 229 230 231 232 233 234 235

	init_completion(&tresult.completion);

	req = ahash_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
		printk(KERN_ERR "alg: hash: Failed to allocate request for "
		       "%s\n", algo);
		goto out_noreq;
	}
	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				   tcrypt_complete, &tresult);

236
	j = 0;
237
	for (i = 0; i < tcount; i++) {
238 239 240
		if (template[i].np)
			continue;

241 242 243 244
		ret = -EINVAL;
		if (WARN_ON(align_offset + template[i].psize > PAGE_SIZE))
			goto out;

245
		j++;
246
		memset(result, 0, MAX_DIGEST_SIZE);
247 248

		hash_buff = xbuf[0];
249
		hash_buff += align_offset;
250 251 252 253 254 255

		memcpy(hash_buff, template[i].plaintext, template[i].psize);
		sg_init_one(&sg[0], hash_buff, template[i].psize);

		if (template[i].ksize) {
			crypto_ahash_clear_flags(tfm, ~0);
256 257 258 259 260 261 262 263
			if (template[i].ksize > MAX_KEYLEN) {
				pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
				       j, algo, template[i].ksize, MAX_KEYLEN);
				ret = -EINVAL;
				goto out;
			}
			memcpy(key, template[i].key, template[i].ksize);
			ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
264 265
			if (ret) {
				printk(KERN_ERR "alg: hash: setkey failed on "
266
				       "test %d for %s: ret=%d\n", j, algo,
267 268 269 270 271 272
				       -ret);
				goto out;
			}
		}

		ahash_request_set_crypt(req, sg, result, template[i].psize);
273
		if (use_digest) {
274
			ret = wait_async_op(&tresult, crypto_ahash_digest(req));
275 276 277 278 279 280
			if (ret) {
				pr_err("alg: hash: digest failed on test %d "
				       "for %s: ret=%d\n", j, algo, -ret);
				goto out;
			}
		} else {
281
			ret = wait_async_op(&tresult, crypto_ahash_init(req));
282 283 284 285 286
			if (ret) {
				pr_err("alt: hash: init failed on test %d "
				       "for %s: ret=%d\n", j, algo, -ret);
				goto out;
			}
287
			ret = wait_async_op(&tresult, crypto_ahash_update(req));
288 289 290 291 292
			if (ret) {
				pr_err("alt: hash: update failed on test %d "
				       "for %s: ret=%d\n", j, algo, -ret);
				goto out;
			}
293
			ret = wait_async_op(&tresult, crypto_ahash_final(req));
294 295 296 297
			if (ret) {
				pr_err("alt: hash: final failed on test %d "
				       "for %s: ret=%d\n", j, algo, -ret);
				goto out;
298 299 300 301 302 303
			}
		}

		if (memcmp(result, template[i].digest,
			   crypto_ahash_digestsize(tfm))) {
			printk(KERN_ERR "alg: hash: Test %d failed for %s\n",
304
			       j, algo);
305 306 307 308 309 310 311 312
			hexdump(result, crypto_ahash_digestsize(tfm));
			ret = -EINVAL;
			goto out;
		}
	}

	j = 0;
	for (i = 0; i < tcount; i++) {
313 314 315 316
		/* alignment tests are only done with continuous buffers */
		if (align_offset != 0)
			break;

317 318
		if (!template[i].np)
			continue;
319

320 321
		j++;
		memset(result, 0, MAX_DIGEST_SIZE);
322

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
		temp = 0;
		sg_init_table(sg, template[i].np);
		ret = -EINVAL;
		for (k = 0; k < template[i].np; k++) {
			if (WARN_ON(offset_in_page(IDX[k]) +
				    template[i].tap[k] > PAGE_SIZE))
				goto out;
			sg_set_buf(&sg[k],
				   memcpy(xbuf[IDX[k] >> PAGE_SHIFT] +
					  offset_in_page(IDX[k]),
					  template[i].plaintext + temp,
					  template[i].tap[k]),
				   template[i].tap[k]);
			temp += template[i].tap[k];
		}
338

339 340 341 342 343
		if (template[i].ksize) {
			if (template[i].ksize > MAX_KEYLEN) {
				pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
				       j, algo, template[i].ksize, MAX_KEYLEN);
				ret = -EINVAL;
344 345
				goto out;
			}
346 347 348
			crypto_ahash_clear_flags(tfm, ~0);
			memcpy(key, template[i].key, template[i].ksize);
			ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
349

350 351 352 353
			if (ret) {
				printk(KERN_ERR "alg: hash: setkey "
				       "failed on chunking test %d "
				       "for %s: ret=%d\n", j, algo, -ret);
354 355 356
				goto out;
			}
		}
357 358 359 360 361 362 363 364

		ahash_request_set_crypt(req, sg, result, template[i].psize);
		ret = crypto_ahash_digest(req);
		switch (ret) {
		case 0:
			break;
		case -EINPROGRESS:
		case -EBUSY:
365 366 367 368
			wait_for_completion(&tresult.completion);
			reinit_completion(&tresult.completion);
			ret = tresult.err;
			if (!ret)
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
				break;
			/* fall through */
		default:
			printk(KERN_ERR "alg: hash: digest failed "
			       "on chunking test %d for %s: "
			       "ret=%d\n", j, algo, -ret);
			goto out;
		}

		if (memcmp(result, template[i].digest,
			   crypto_ahash_digestsize(tfm))) {
			printk(KERN_ERR "alg: hash: Chunking test %d "
			       "failed for %s\n", j, algo);
			hexdump(result, crypto_ahash_digestsize(tfm));
			ret = -EINVAL;
			goto out;
		}
386 387 388 389 390 391 392
	}

	ret = 0;

out:
	ahash_request_free(req);
out_noreq:
393 394
	testmgr_free_buf(xbuf);
out_nobuf:
395 396
	kfree(key);
	kfree(result);
397 398 399
	return ret;
}

400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426
static int test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
		     unsigned int tcount, bool use_digest)
{
	unsigned int alignmask;
	int ret;

	ret = __test_hash(tfm, template, tcount, use_digest, 0);
	if (ret)
		return ret;

	/* test unaligned buffers, check with one byte offset */
	ret = __test_hash(tfm, template, tcount, use_digest, 1);
	if (ret)
		return ret;

	alignmask = crypto_tfm_alg_alignmask(&tfm->base);
	if (alignmask) {
		/* Check if alignment mask for tfm is correctly set. */
		ret = __test_hash(tfm, template, tcount, use_digest,
				  alignmask + 1);
		if (ret)
			return ret;
	}

	return 0;
}

427 428
static int __test_aead(struct crypto_aead *tfm, int enc,
		       struct aead_testvec *template, unsigned int tcount,
429
		       const bool diff_dst, const int align_offset)
430 431 432
{
	const char *algo = crypto_tfm_alg_driver_name(crypto_aead_tfm(tfm));
	unsigned int i, j, k, n, temp;
433
	int ret = -ENOMEM;
434 435 436
	char *q;
	char *key;
	struct aead_request *req;
437 438 439
	struct scatterlist *sg;
	struct scatterlist *sgout;
	const char *e, *d;
440
	struct tcrypt_result result;
441
	unsigned int authsize, iv_len;
442
	void *input;
443
	void *output;
444
	void *assoc;
445
	char *iv;
446
	char *xbuf[XBUFSIZE];
447
	char *xoutbuf[XBUFSIZE];
448 449
	char *axbuf[XBUFSIZE];

450 451 452
	iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
	if (!iv)
		return ret;
453 454 455
	key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
	if (!key)
		goto out_noxbuf;
456 457 458 459
	if (testmgr_alloc_buf(xbuf))
		goto out_noxbuf;
	if (testmgr_alloc_buf(axbuf))
		goto out_noaxbuf;
460 461 462 463
	if (diff_dst && testmgr_alloc_buf(xoutbuf))
		goto out_nooutbuf;

	/* avoid "the frame size is larger than 1024 bytes" compiler warning */
464
	sg = kmalloc(sizeof(*sg) * 8 * (diff_dst ? 4 : 2), GFP_KERNEL);
465 466
	if (!sg)
		goto out_nosg;
467
	sgout = &sg[16];
468 469 470 471 472 473

	if (diff_dst)
		d = "-ddst";
	else
		d = "";

474 475 476 477 478 479 480 481 482
	if (enc == ENCRYPT)
		e = "encryption";
	else
		e = "decryption";

	init_completion(&result.completion);

	req = aead_request_alloc(tfm, GFP_KERNEL);
	if (!req) {
483 484
		pr_err("alg: aead%s: Failed to allocate request for %s\n",
		       d, algo);
485 486 487 488 489 490 491
		goto out;
	}

	aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				  tcrypt_complete, &result);

	for (i = 0, j = 0; i < tcount; i++) {
492 493
		if (template[i].np)
			continue;
494

495
		j++;
496

497 498 499 500 501 502
		/* some templates have no input data but they will
		 * touch input
		 */
		input = xbuf[0];
		input += align_offset;
		assoc = axbuf[0];
503

504 505 506 507
		ret = -EINVAL;
		if (WARN_ON(align_offset + template[i].ilen >
			    PAGE_SIZE || template[i].alen > PAGE_SIZE))
			goto out;
508

509 510
		memcpy(input, template[i].input, template[i].ilen);
		memcpy(assoc, template[i].assoc, template[i].alen);
511
		iv_len = crypto_aead_ivsize(tfm);
512
		if (template[i].iv)
513
			memcpy(iv, template[i].iv, iv_len);
514
		else
515
			memset(iv, 0, iv_len);
516 517 518 519 520 521 522 523 524 525 526 527 528

		crypto_aead_clear_flags(tfm, ~0);
		if (template[i].wk)
			crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);

		if (template[i].klen > MAX_KEYLEN) {
			pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
			       d, j, algo, template[i].klen,
			       MAX_KEYLEN);
			ret = -EINVAL;
			goto out;
		}
		memcpy(key, template[i].key, template[i].klen);
529

530 531 532 533 534 535 536
		ret = crypto_aead_setkey(tfm, key, template[i].klen);
		if (!ret == template[i].fail) {
			pr_err("alg: aead%s: setkey failed on test %d for %s: flags=%x\n",
			       d, j, algo, crypto_aead_get_flags(tfm));
			goto out;
		} else if (ret)
			continue;
537

538 539 540 541 542 543 544
		authsize = abs(template[i].rlen - template[i].ilen);
		ret = crypto_aead_setauthsize(tfm, authsize);
		if (ret) {
			pr_err("alg: aead%s: Failed to set authsize to %u on test %d for %s\n",
			       d, authsize, j, algo);
			goto out;
		}
545

546 547 548 549 550 551 552
		k = !!template[i].alen;
		sg_init_table(sg, k + 1);
		sg_set_buf(&sg[0], assoc, template[i].alen);
		sg_set_buf(&sg[k], input,
			   template[i].ilen + (enc ? authsize : 0));
		output = input;

553
		if (diff_dst) {
554 555 556
			sg_init_table(sgout, k + 1);
			sg_set_buf(&sgout[0], assoc, template[i].alen);

557 558
			output = xoutbuf[0];
			output += align_offset;
559 560
			sg_set_buf(&sgout[k], output,
				   template[i].rlen + (enc ? 0 : authsize));
561
		}
562

563 564
		aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
				       template[i].ilen, iv);
565

566
		aead_request_set_ad(req, template[i].alen);
567

568
		ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
569

570 571 572 573 574 575 576 577
		switch (ret) {
		case 0:
			if (template[i].novrfy) {
				/* verification was supposed to fail */
				pr_err("alg: aead%s: %s failed on test %d for %s: ret was 0, expected -EBADMSG\n",
				       d, e, j, algo);
				/* so really, we got a bad message */
				ret = -EBADMSG;
578 579
				goto out;
			}
580 581 582
			break;
		case -EINPROGRESS:
		case -EBUSY:
583 584 585 586
			wait_for_completion(&result.completion);
			reinit_completion(&result.completion);
			ret = result.err;
			if (!ret)
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
				break;
		case -EBADMSG:
			if (template[i].novrfy)
				/* verification failure was expected */
				continue;
			/* fall through */
		default:
			pr_err("alg: aead%s: %s failed on test %d for %s: ret=%d\n",
			       d, e, j, algo, -ret);
			goto out;
		}

		q = output;
		if (memcmp(q, template[i].result, template[i].rlen)) {
			pr_err("alg: aead%s: Test %d failed on %s for %s\n",
			       d, j, e, algo);
			hexdump(q, template[i].rlen);
			ret = -EINVAL;
			goto out;
606 607 608 609
		}
	}

	for (i = 0, j = 0; i < tcount; i++) {
610 611 612 613
		/* alignment tests are only done with continuous buffers */
		if (align_offset != 0)
			break;

614 615
		if (!template[i].np)
			continue;
616

617
		j++;
618

619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
		if (template[i].iv)
			memcpy(iv, template[i].iv, MAX_IVLEN);
		else
			memset(iv, 0, MAX_IVLEN);

		crypto_aead_clear_flags(tfm, ~0);
		if (template[i].wk)
			crypto_aead_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
		if (template[i].klen > MAX_KEYLEN) {
			pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
			       d, j, algo, template[i].klen, MAX_KEYLEN);
			ret = -EINVAL;
			goto out;
		}
		memcpy(key, template[i].key, template[i].klen);
634

635 636 637 638 639 640 641
		ret = crypto_aead_setkey(tfm, key, template[i].klen);
		if (!ret == template[i].fail) {
			pr_err("alg: aead%s: setkey failed on chunk test %d for %s: flags=%x\n",
			       d, j, algo, crypto_aead_get_flags(tfm));
			goto out;
		} else if (ret)
			continue;
642

643
		authsize = abs(template[i].rlen - template[i].ilen);
644

645
		ret = -EINVAL;
646
		sg_init_table(sg, template[i].anp + template[i].np);
647
		if (diff_dst)
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
			sg_init_table(sgout, template[i].anp + template[i].np);

		ret = -EINVAL;
		for (k = 0, temp = 0; k < template[i].anp; k++) {
			if (WARN_ON(offset_in_page(IDX[k]) +
				    template[i].atap[k] > PAGE_SIZE))
				goto out;
			sg_set_buf(&sg[k],
				   memcpy(axbuf[IDX[k] >> PAGE_SHIFT] +
					  offset_in_page(IDX[k]),
					  template[i].assoc + temp,
					  template[i].atap[k]),
				   template[i].atap[k]);
			if (diff_dst)
				sg_set_buf(&sgout[k],
					   axbuf[IDX[k] >> PAGE_SHIFT] +
					   offset_in_page(IDX[k]),
					   template[i].atap[k]);
			temp += template[i].atap[k];
		}

669 670 671 672
		for (k = 0, temp = 0; k < template[i].np; k++) {
			if (WARN_ON(offset_in_page(IDX[k]) +
				    template[i].tap[k] > PAGE_SIZE))
				goto out;
673

674 675
			q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
			memcpy(q, template[i].input + temp, template[i].tap[k]);
676 677
			sg_set_buf(&sg[template[i].anp + k],
				   q, template[i].tap[k]);
678

679 680 681
			if (diff_dst) {
				q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
				    offset_in_page(IDX[k]);
682

683
				memset(q, 0, template[i].tap[k]);
684

685 686
				sg_set_buf(&sgout[template[i].anp + k],
					   q, template[i].tap[k]);
687
			}
688

689 690 691 692 693
			n = template[i].tap[k];
			if (k == template[i].np - 1 && enc)
				n += authsize;
			if (offset_in_page(q) + n < PAGE_SIZE)
				q[n] = 0;
694

695 696
			temp += template[i].tap[k];
		}
697

698 699 700 701 702 703
		ret = crypto_aead_setauthsize(tfm, authsize);
		if (ret) {
			pr_err("alg: aead%s: Failed to set authsize to %u on chunk test %d for %s\n",
			       d, authsize, j, algo);
			goto out;
		}
704

705
		if (enc) {
706 707 708
			if (WARN_ON(sg[template[i].anp + k - 1].offset +
				    sg[template[i].anp + k - 1].length +
				    authsize > PAGE_SIZE)) {
709
				ret = -EINVAL;
710 711 712
				goto out;
			}

713
			if (diff_dst)
714 715 716
				sgout[template[i].anp + k - 1].length +=
					authsize;
			sg[template[i].anp + k - 1].length += authsize;
717
		}
718

719 720 721
		aead_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
				       template[i].ilen,
				       iv);
722

723
		aead_request_set_ad(req, template[i].alen);
724

725
		ret = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
726

727 728 729 730 731 732 733 734
		switch (ret) {
		case 0:
			if (template[i].novrfy) {
				/* verification was supposed to fail */
				pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret was 0, expected -EBADMSG\n",
				       d, e, j, algo);
				/* so really, we got a bad message */
				ret = -EBADMSG;
735 736
				goto out;
			}
737 738 739
			break;
		case -EINPROGRESS:
		case -EBUSY:
740 741 742 743
			wait_for_completion(&result.completion);
			reinit_completion(&result.completion);
			ret = result.err;
			if (!ret)
744 745 746 747 748 749 750 751 752 753 754
				break;
		case -EBADMSG:
			if (template[i].novrfy)
				/* verification failure was expected */
				continue;
			/* fall through */
		default:
			pr_err("alg: aead%s: %s failed on chunk test %d for %s: ret=%d\n",
			       d, e, j, algo, -ret);
			goto out;
		}
755

756 757 758 759 760 761 762 763
		ret = -EINVAL;
		for (k = 0, temp = 0; k < template[i].np; k++) {
			if (diff_dst)
				q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
				    offset_in_page(IDX[k]);
			else
				q = xbuf[IDX[k] >> PAGE_SHIFT] +
				    offset_in_page(IDX[k]);
764

765 766 767
			n = template[i].tap[k];
			if (k == template[i].np - 1)
				n += enc ? authsize : -authsize;
768

769 770 771 772 773 774
			if (memcmp(q, template[i].result + temp, n)) {
				pr_err("alg: aead%s: Chunk test %d failed on %s at page %u for %s\n",
				       d, j, e, k, algo);
				hexdump(q, n);
				goto out;
			}
775

776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
			q += n;
			if (k == template[i].np - 1 && !enc) {
				if (!diff_dst &&
					memcmp(q, template[i].input +
					      temp + n, authsize))
					n = authsize;
				else
					n = 0;
			} else {
				for (n = 0; offset_in_page(q + n) && q[n]; n++)
					;
			}
			if (n) {
				pr_err("alg: aead%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
				       d, j, e, k, algo, n);
				hexdump(q, n);
				goto out;
793
			}
794 795

			temp += template[i].tap[k];
796 797 798 799 800 801 802
		}
	}

	ret = 0;

out:
	aead_request_free(req);
803 804 805 806 807
	kfree(sg);
out_nosg:
	if (diff_dst)
		testmgr_free_buf(xoutbuf);
out_nooutbuf:
808 809 810 811
	testmgr_free_buf(axbuf);
out_noaxbuf:
	testmgr_free_buf(xbuf);
out_noxbuf:
812
	kfree(key);
813
	kfree(iv);
814 815 816
	return ret;
}

817 818 819
static int test_aead(struct crypto_aead *tfm, int enc,
		     struct aead_testvec *template, unsigned int tcount)
{
820
	unsigned int alignmask;
821 822 823
	int ret;

	/* test 'dst == src' case */
824
	ret = __test_aead(tfm, enc, template, tcount, false, 0);
825 826 827 828
	if (ret)
		return ret;

	/* test 'dst != src' case */
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
	ret = __test_aead(tfm, enc, template, tcount, true, 0);
	if (ret)
		return ret;

	/* test unaligned buffers, check with one byte offset */
	ret = __test_aead(tfm, enc, template, tcount, true, 1);
	if (ret)
		return ret;

	alignmask = crypto_tfm_alg_alignmask(&tfm->base);
	if (alignmask) {
		/* Check if alignment mask for tfm is correctly set. */
		ret = __test_aead(tfm, enc, template, tcount, true,
				  alignmask + 1);
		if (ret)
			return ret;
	}

	return 0;
848 849
}

850
static int test_cipher(struct crypto_cipher *tfm, int enc,
851
		       struct cipher_testvec *template, unsigned int tcount)
852 853 854 855 856 857
{
	const char *algo = crypto_tfm_alg_driver_name(crypto_cipher_tfm(tfm));
	unsigned int i, j, k;
	char *q;
	const char *e;
	void *data;
858 859 860 861 862
	char *xbuf[XBUFSIZE];
	int ret = -ENOMEM;

	if (testmgr_alloc_buf(xbuf))
		goto out_nobuf;
863 864 865 866 867 868 869 870 871 872 873 874 875

	if (enc == ENCRYPT)
	        e = "encryption";
	else
		e = "decryption";

	j = 0;
	for (i = 0; i < tcount; i++) {
		if (template[i].np)
			continue;

		j++;

876 877 878 879
		ret = -EINVAL;
		if (WARN_ON(template[i].ilen > PAGE_SIZE))
			goto out;

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
		data = xbuf[0];
		memcpy(data, template[i].input, template[i].ilen);

		crypto_cipher_clear_flags(tfm, ~0);
		if (template[i].wk)
			crypto_cipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);

		ret = crypto_cipher_setkey(tfm, template[i].key,
					   template[i].klen);
		if (!ret == template[i].fail) {
			printk(KERN_ERR "alg: cipher: setkey failed "
			       "on test %d for %s: flags=%x\n", j,
			       algo, crypto_cipher_get_flags(tfm));
			goto out;
		} else if (ret)
			continue;

		for (k = 0; k < template[i].ilen;
		     k += crypto_cipher_blocksize(tfm)) {
			if (enc)
				crypto_cipher_encrypt_one(tfm, data + k,
							  data + k);
			else
				crypto_cipher_decrypt_one(tfm, data + k,
							  data + k);
		}

		q = data;
		if (memcmp(q, template[i].result, template[i].rlen)) {
			printk(KERN_ERR "alg: cipher: Test %d failed "
			       "on %s for %s\n", j, e, algo);
			hexdump(q, template[i].rlen);
			ret = -EINVAL;
			goto out;
		}
	}

	ret = 0;

out:
920 921
	testmgr_free_buf(xbuf);
out_nobuf:
922 923 924
	return ret;
}

925
static int __test_skcipher(struct crypto_skcipher *tfm, int enc,
926
			   struct cipher_testvec *template, unsigned int tcount,
927
			   const bool diff_dst, const int align_offset)
928 929
{
	const char *algo =
930
		crypto_tfm_alg_driver_name(crypto_skcipher_tfm(tfm));
931 932
	unsigned int i, j, k, n, temp;
	char *q;
933
	struct skcipher_request *req;
934
	struct scatterlist sg[8];
935 936
	struct scatterlist sgout[8];
	const char *e, *d;
937 938 939
	struct tcrypt_result result;
	void *data;
	char iv[MAX_IVLEN];
940
	char *xbuf[XBUFSIZE];
941
	char *xoutbuf[XBUFSIZE];
942
	int ret = -ENOMEM;
943
	unsigned int ivsize = crypto_skcipher_ivsize(tfm);
944 945 946

	if (testmgr_alloc_buf(xbuf))
		goto out_nobuf;
947

948 949 950 951 952 953 954 955
	if (diff_dst && testmgr_alloc_buf(xoutbuf))
		goto out_nooutbuf;

	if (diff_dst)
		d = "-ddst";
	else
		d = "";

956 957 958 959 960 961 962
	if (enc == ENCRYPT)
	        e = "encryption";
	else
		e = "decryption";

	init_completion(&result.completion);

963
	req = skcipher_request_alloc(tfm, GFP_KERNEL);
964
	if (!req) {
965 966
		pr_err("alg: skcipher%s: Failed to allocate request for %s\n",
		       d, algo);
967 968 969
		goto out;
	}

970 971
	skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
				      tcrypt_complete, &result);
972 973 974

	j = 0;
	for (i = 0; i < tcount; i++) {
975 976 977
		if (template[i].np && !template[i].also_non_np)
			continue;

978
		if (template[i].iv)
979
			memcpy(iv, template[i].iv, ivsize);
980 981 982
		else
			memset(iv, 0, MAX_IVLEN);

983 984 985 986
		j++;
		ret = -EINVAL;
		if (WARN_ON(align_offset + template[i].ilen > PAGE_SIZE))
			goto out;
987

988 989 990 991
		data = xbuf[0];
		data += align_offset;
		memcpy(data, template[i].input, template[i].ilen);

992
		crypto_skcipher_clear_flags(tfm, ~0);
993
		if (template[i].wk)
994 995
			crypto_skcipher_set_flags(tfm,
						  CRYPTO_TFM_REQ_WEAK_KEY);
996

997 998
		ret = crypto_skcipher_setkey(tfm, template[i].key,
					     template[i].klen);
999 1000
		if (!ret == template[i].fail) {
			pr_err("alg: skcipher%s: setkey failed on test %d for %s: flags=%x\n",
1001
			       d, j, algo, crypto_skcipher_get_flags(tfm));
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
			goto out;
		} else if (ret)
			continue;

		sg_init_one(&sg[0], data, template[i].ilen);
		if (diff_dst) {
			data = xoutbuf[0];
			data += align_offset;
			sg_init_one(&sgout[0], data, template[i].ilen);
		}
1012

1013 1014 1015 1016
		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
					   template[i].ilen, iv);
		ret = enc ? crypto_skcipher_encrypt(req) :
			    crypto_skcipher_decrypt(req);
1017 1018 1019 1020 1021 1022

		switch (ret) {
		case 0:
			break;
		case -EINPROGRESS:
		case -EBUSY:
1023 1024 1025 1026
			wait_for_completion(&result.completion);
			reinit_completion(&result.completion);
			ret = result.err;
			if (!ret)
1027
				break;
1028 1029 1030 1031 1032 1033
			/* fall through */
		default:
			pr_err("alg: skcipher%s: %s failed on test %d for %s: ret=%d\n",
			       d, e, j, algo, -ret);
			goto out;
		}
1034

1035 1036
		q = data;
		if (memcmp(q, template[i].result, template[i].rlen)) {
1037
			pr_err("alg: skcipher%s: Test %d failed (invalid result) on %s for %s\n",
1038 1039 1040 1041
			       d, j, e, algo);
			hexdump(q, template[i].rlen);
			ret = -EINVAL;
			goto out;
1042
		}
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052

		if (template[i].iv_out &&
		    memcmp(iv, template[i].iv_out,
			   crypto_skcipher_ivsize(tfm))) {
			pr_err("alg: skcipher%s: Test %d failed (invalid output IV) on %s for %s\n",
			       d, j, e, algo);
			hexdump(iv, crypto_skcipher_ivsize(tfm));
			ret = -EINVAL;
			goto out;
		}
1053 1054 1055 1056
	}

	j = 0;
	for (i = 0; i < tcount; i++) {
1057 1058 1059
		/* alignment tests are only done with continuous buffers */
		if (align_offset != 0)
			break;
1060

1061 1062 1063
		if (!template[i].np)
			continue;

1064
		if (template[i].iv)
1065
			memcpy(iv, template[i].iv, ivsize);
1066 1067 1068
		else
			memset(iv, 0, MAX_IVLEN);

1069
		j++;
1070
		crypto_skcipher_clear_flags(tfm, ~0);
1071
		if (template[i].wk)
1072 1073
			crypto_skcipher_set_flags(tfm,
						  CRYPTO_TFM_REQ_WEAK_KEY);
1074

1075 1076
		ret = crypto_skcipher_setkey(tfm, template[i].key,
					     template[i].klen);
1077 1078
		if (!ret == template[i].fail) {
			pr_err("alg: skcipher%s: setkey failed on chunk test %d for %s: flags=%x\n",
1079
			       d, j, algo, crypto_skcipher_get_flags(tfm));
1080 1081 1082
			goto out;
		} else if (ret)
			continue;
1083

1084 1085 1086 1087 1088 1089 1090 1091
		temp = 0;
		ret = -EINVAL;
		sg_init_table(sg, template[i].np);
		if (diff_dst)
			sg_init_table(sgout, template[i].np);
		for (k = 0; k < template[i].np; k++) {
			if (WARN_ON(offset_in_page(IDX[k]) +
				    template[i].tap[k] > PAGE_SIZE))
1092 1093
				goto out;

1094
			q = xbuf[IDX[k] >> PAGE_SHIFT] + offset_in_page(IDX[k]);
1095

1096 1097 1098 1099 1100 1101 1102 1103
			memcpy(q, template[i].input + temp, template[i].tap[k]);

			if (offset_in_page(q) + template[i].tap[k] < PAGE_SIZE)
				q[template[i].tap[k]] = 0;

			sg_set_buf(&sg[k], q, template[i].tap[k]);
			if (diff_dst) {
				q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
1104 1105
				    offset_in_page(IDX[k]);

1106
				sg_set_buf(&sgout[k], q, template[i].tap[k]);
1107

1108 1109 1110
				memset(q, 0, template[i].tap[k]);
				if (offset_in_page(q) +
				    template[i].tap[k] < PAGE_SIZE)
1111
					q[template[i].tap[k]] = 0;
1112
			}
1113

1114 1115
			temp += template[i].tap[k];
		}
1116

1117 1118
		skcipher_request_set_crypt(req, sg, (diff_dst) ? sgout : sg,
					   template[i].ilen, iv);
1119

1120 1121
		ret = enc ? crypto_skcipher_encrypt(req) :
			    crypto_skcipher_decrypt(req);
1122

1123 1124 1125 1126 1127
		switch (ret) {
		case 0:
			break;
		case -EINPROGRESS:
		case -EBUSY:
1128 1129 1130 1131
			wait_for_completion(&result.completion);
			reinit_completion(&result.completion);
			ret = result.err;
			if (!ret)
1132 1133 1134 1135 1136 1137 1138
				break;
			/* fall through */
		default:
			pr_err("alg: skcipher%s: %s failed on chunk test %d for %s: ret=%d\n",
			       d, e, j, algo, -ret);
			goto out;
		}
1139

1140 1141 1142 1143 1144 1145 1146 1147 1148
		temp = 0;
		ret = -EINVAL;
		for (k = 0; k < template[i].np; k++) {
			if (diff_dst)
				q = xoutbuf[IDX[k] >> PAGE_SHIFT] +
				    offset_in_page(IDX[k]);
			else
				q = xbuf[IDX[k] >> PAGE_SHIFT] +
				    offset_in_page(IDX[k]);
1149

1150 1151 1152 1153 1154
			if (memcmp(q, template[i].result + temp,
				   template[i].tap[k])) {
				pr_err("alg: skcipher%s: Chunk test %d failed on %s at page %u for %s\n",
				       d, j, e, k, algo);
				hexdump(q, template[i].tap[k]);
1155 1156 1157
				goto out;
			}

1158 1159 1160 1161 1162 1163 1164 1165
			q += template[i].tap[k];
			for (n = 0; offset_in_page(q + n) && q[n]; n++)
				;
			if (n) {
				pr_err("alg: skcipher%s: Result buffer corruption in chunk test %d on %s at page %u for %s: %u bytes:\n",
				       d, j, e, k, algo, n);
				hexdump(q, n);
				goto out;
1166
			}
1167
			temp += template[i].tap[k];
1168 1169 1170 1171 1172 1173
		}
	}

	ret = 0;

out:
1174
	skcipher_request_free(req);
1175 1176 1177
	if (diff_dst)
		testmgr_free_buf(xoutbuf);
out_nooutbuf:
1178 1179
	testmgr_free_buf(xbuf);
out_nobuf:
1180 1181 1182
	return ret;
}

1183
static int test_skcipher(struct crypto_skcipher *tfm, int enc,
1184 1185
			 struct cipher_testvec *template, unsigned int tcount)
{
1186
	unsigned int alignmask;
1187 1188 1189
	int ret;

	/* test 'dst == src' case */
1190
	ret = __test_skcipher(tfm, enc, template, tcount, false, 0);
1191 1192 1193 1194
	if (ret)
		return ret;

	/* test 'dst != src' case */
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
	ret = __test_skcipher(tfm, enc, template, tcount, true, 0);
	if (ret)
		return ret;

	/* test unaligned buffers, check with one byte offset */
	ret = __test_skcipher(tfm, enc, template, tcount, true, 1);
	if (ret)
		return ret;

	alignmask = crypto_tfm_alg_alignmask(&tfm->base);
	if (alignmask) {
		/* Check if alignment mask for tfm is correctly set. */
		ret = __test_skcipher(tfm, enc, template, tcount, true,
				      alignmask + 1);
		if (ret)
			return ret;
	}

	return 0;
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223 1224
static int test_comp(struct crypto_comp *tfm, struct comp_testvec *ctemplate,
		     struct comp_testvec *dtemplate, int ctcount, int dtcount)
{
	const char *algo = crypto_tfm_alg_driver_name(crypto_comp_tfm(tfm));
	unsigned int i;
	char result[COMP_BUF_SIZE];
	int ret;

	for (i = 0; i < ctcount; i++) {
1225 1226
		int ilen;
		unsigned int dlen = COMP_BUF_SIZE;
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

		memset(result, 0, sizeof (result));

		ilen = ctemplate[i].inlen;
		ret = crypto_comp_compress(tfm, ctemplate[i].input,
		                           ilen, result, &dlen);
		if (ret) {
			printk(KERN_ERR "alg: comp: compression failed "
			       "on test %d for %s: ret=%d\n", i + 1, algo,
			       -ret);
			goto out;
		}

1240 1241 1242 1243 1244 1245 1246 1247
		if (dlen != ctemplate[i].outlen) {
			printk(KERN_ERR "alg: comp: Compression test %d "
			       "failed for %s: output len = %d\n", i + 1, algo,
			       dlen);
			ret = -EINVAL;
			goto out;
		}

1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
		if (memcmp(result, ctemplate[i].output, dlen)) {
			printk(KERN_ERR "alg: comp: Compression test %d "
			       "failed for %s\n", i + 1, algo);
			hexdump(result, dlen);
			ret = -EINVAL;
			goto out;
		}
	}

	for (i = 0; i < dtcount; i++) {
1258 1259
		int ilen;
		unsigned int dlen = COMP_BUF_SIZE;
1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272

		memset(result, 0, sizeof (result));

		ilen = dtemplate[i].inlen;
		ret = crypto_comp_decompress(tfm, dtemplate[i].input,
		                             ilen, result, &dlen);
		if (ret) {
			printk(KERN_ERR "alg: comp: decompression failed "
			       "on test %d for %s: ret=%d\n", i + 1, algo,
			       -ret);
			goto out;
		}

1273 1274 1275 1276 1277 1278 1279 1280
		if (dlen != dtemplate[i].outlen) {
			printk(KERN_ERR "alg: comp: Decompression test %d "
			       "failed for %s: output len = %d\n", i + 1, algo,
			       dlen);
			ret = -EINVAL;
			goto out;
		}

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
		if (memcmp(result, dtemplate[i].output, dlen)) {
			printk(KERN_ERR "alg: comp: Decompression test %d "
			       "failed for %s\n", i + 1, algo);
			hexdump(result, dlen);
			ret = -EINVAL;
			goto out;
		}
	}

	ret = 0;

out:
	return ret;
}

1296 1297 1298 1299 1300 1301 1302 1303
static int test_pcomp(struct crypto_pcomp *tfm,
		      struct pcomp_testvec *ctemplate,
		      struct pcomp_testvec *dtemplate, int ctcount,
		      int dtcount)
{
	const char *algo = crypto_tfm_alg_driver_name(crypto_pcomp_tfm(tfm));
	unsigned int i;
	char result[COMP_BUF_SIZE];
1304
	int res;
1305 1306 1307

	for (i = 0; i < ctcount; i++) {
		struct comp_request req;
1308
		unsigned int produced = 0;
1309

1310 1311 1312
		res = crypto_compress_setup(tfm, ctemplate[i].params,
					    ctemplate[i].paramsize);
		if (res) {
1313
			pr_err("alg: pcomp: compression setup failed on test "
1314 1315
			       "%d for %s: error=%d\n", i + 1, algo, res);
			return res;
1316 1317
		}

1318 1319
		res = crypto_compress_init(tfm);
		if (res) {
1320
			pr_err("alg: pcomp: compression init failed on test "
1321 1322
			       "%d for %s: error=%d\n", i + 1, algo, res);
			return res;
1323 1324 1325 1326 1327 1328 1329 1330 1331
		}

		memset(result, 0, sizeof(result));

		req.next_in = ctemplate[i].input;
		req.avail_in = ctemplate[i].inlen / 2;
		req.next_out = result;
		req.avail_out = ctemplate[i].outlen / 2;

1332 1333
		res = crypto_compress_update(tfm, &req);
		if (res < 0 && (res != -EAGAIN || req.avail_in)) {
1334
			pr_err("alg: pcomp: compression update failed on test "
1335 1336
			       "%d for %s: error=%d\n", i + 1, algo, res);
			return res;
1337
		}
1338 1339
		if (res > 0)
			produced += res;
1340 1341 1342 1343

		/* Add remaining input data */
		req.avail_in += (ctemplate[i].inlen + 1) / 2;