rhashtable.c 28.4 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
/*
 * Resizable, Scalable, Concurrent Hash Table
 *
 * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
 * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
 *
 * Based on the following paper:
 * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
 *
 * Code partially derived from nft_hash
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
23
#include <linux/jhash.h>
24
25
26
27
28
#include <linux/random.h>
#include <linux/rhashtable.h>

#define HASH_DEFAULT_SIZE	64UL
#define HASH_MIN_SIZE		4UL
29
30
#define BUCKET_LOCKS_PER_CPU   128UL

31
32
33
/* Base bits plus 1 bit for nulls marker */
#define HASH_RESERVED_SPACE	(RHT_BASE_BITS + 1)

34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
enum {
	RHT_LOCK_NORMAL,
	RHT_LOCK_NESTED,
	RHT_LOCK_NESTED2,
};

/* The bucket lock is selected based on the hash and protects mutations
 * on a group of hash buckets.
 *
 * IMPORTANT: When holding the bucket lock of both the old and new table
 * during expansions and shrinking, the old bucket lock must always be
 * acquired first.
 */
static spinlock_t *bucket_lock(const struct bucket_table *tbl, u32 hash)
{
	return &tbl->locks[hash & tbl->locks_mask];
}
51
52

#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
53
54
#define ASSERT_BUCKET_LOCK(TBL, HASH) \
	BUG_ON(!lockdep_rht_bucket_is_held(TBL, HASH))
55
56

#ifdef CONFIG_PROVE_LOCKING
57
int lockdep_rht_mutex_is_held(struct rhashtable *ht)
58
{
59
	return (debug_locks) ? lockdep_is_held(&ht->mutex) : 1;
60
61
}
EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
62
63
64

int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash)
{
65
66
67
	spinlock_t *lock = bucket_lock(tbl, hash);

	return (debug_locks) ? lockdep_is_held(lock) : 1;
68
69
}
EXPORT_SYMBOL_GPL(lockdep_rht_bucket_is_held);
70
71
#endif

72
static void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
73
74
75
76
{
	return (void *) he - ht->p.head_offset;
}

77
static u32 rht_bucket_index(const struct bucket_table *tbl, u32 hash)
78
{
79
	return hash & (tbl->size - 1);
80
81
}

82
static u32 obj_raw_hashfn(const struct rhashtable *ht, const void *ptr)
83
{
84
	u32 hash;
85

86
87
88
89
90
	if (unlikely(!ht->p.key_len))
		hash = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
	else
		hash = ht->p.hashfn(ptr + ht->p.key_offset, ht->p.key_len,
				    ht->p.hash_rnd);
91

92
	return hash >> HASH_RESERVED_SPACE;
93
94
}

95
static u32 key_hashfn(struct rhashtable *ht, const void *key, u32 len)
96
97
{
	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
98
99
100
	u32 hash;

	hash = ht->p.hashfn(key, len, ht->p.hash_rnd);
101
	hash >>= HASH_RESERVED_SPACE;
102

103
	return rht_bucket_index(tbl, hash);
104
105
106
}

static u32 head_hashfn(const struct rhashtable *ht,
107
108
		       const struct bucket_table *tbl,
		       const struct rhash_head *he)
109
{
110
	return rht_bucket_index(tbl, obj_raw_hashfn(ht, rht_obj(ht, he)));
111
112
}

113
114
115
116
117
static struct rhash_head __rcu **bucket_tail(struct bucket_table *tbl, u32 n)
{
	struct rhash_head __rcu **pprev;

	for (pprev = &tbl->buckets[n];
118
	     !rht_is_a_nulls(rht_dereference_bucket(*pprev, tbl, n));
119
120
121
122
123
124
	     pprev = &rht_dereference_bucket(*pprev, tbl, n)->next)
		;

	return pprev;
}

125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
static int alloc_bucket_locks(struct rhashtable *ht, struct bucket_table *tbl)
{
	unsigned int i, size;
#if defined(CONFIG_PROVE_LOCKING)
	unsigned int nr_pcpus = 2;
#else
	unsigned int nr_pcpus = num_possible_cpus();
#endif

	nr_pcpus = min_t(unsigned int, nr_pcpus, 32UL);
	size = roundup_pow_of_two(nr_pcpus * ht->p.locks_mul);

	/* Never allocate more than one lock per bucket */
	size = min_t(unsigned int, size, tbl->size);

	if (sizeof(spinlock_t) != 0) {
#ifdef CONFIG_NUMA
		if (size * sizeof(spinlock_t) > PAGE_SIZE)
			tbl->locks = vmalloc(size * sizeof(spinlock_t));
		else
#endif
		tbl->locks = kmalloc_array(size, sizeof(spinlock_t),
					   GFP_KERNEL);
		if (!tbl->locks)
			return -ENOMEM;
		for (i = 0; i < size; i++)
			spin_lock_init(&tbl->locks[i]);
	}
	tbl->locks_mask = size - 1;

	return 0;
}

static void bucket_table_free(const struct bucket_table *tbl)
{
	if (tbl)
		kvfree(tbl->locks);

	kvfree(tbl);
}

static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
					       size_t nbuckets)
168
169
170
{
	struct bucket_table *tbl;
	size_t size;
171
	int i;
172
173

	size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
174
	tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
175
176
177
178
179
180
181
182
	if (tbl == NULL)
		tbl = vzalloc(size);

	if (tbl == NULL)
		return NULL;

	tbl->size = nbuckets;

183
184
185
186
	if (alloc_bucket_locks(ht, tbl) < 0) {
		bucket_table_free(tbl);
		return NULL;
	}
187

188
189
190
	for (i = 0; i < nbuckets; i++)
		INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);

191
	return tbl;
192
193
194
195
196
197
198
199
200
201
}

/**
 * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
{
	/* Expand table when exceeding 75% load */
202
203
	return atomic_read(&ht->nelems) > (new_size / 4 * 3) &&
	       (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift);
204
205
206
207
208
209
210
211
212
213
214
}
EXPORT_SYMBOL_GPL(rht_grow_above_75);

/**
 * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
 * @ht:		hash table
 * @new_size:	new table size
 */
bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
{
	/* Shrink table beneath 30% load */
215
216
	return atomic_read(&ht->nelems) < (new_size * 3 / 10) &&
	       (atomic_read(&ht->shift) > ht->p.min_shift);
217
218
219
220
221
}
EXPORT_SYMBOL_GPL(rht_shrink_below_30);

static void hashtable_chain_unzip(const struct rhashtable *ht,
				  const struct bucket_table *new_tbl,
222
223
				  struct bucket_table *old_tbl,
				  size_t old_hash)
224
225
{
	struct rhash_head *he, *p, *next;
226
227
228
229
	spinlock_t *new_bucket_lock, *new_bucket_lock2 = NULL;
	unsigned int new_hash, new_hash2;

	ASSERT_BUCKET_LOCK(old_tbl, old_hash);
230
231

	/* Old bucket empty, no work needed. */
232
233
	p = rht_dereference_bucket(old_tbl->buckets[old_hash], old_tbl,
				   old_hash);
234
	if (rht_is_a_nulls(p))
235
236
		return;

237
238
239
	new_hash = new_hash2 = head_hashfn(ht, new_tbl, p);
	new_bucket_lock = bucket_lock(new_tbl, new_hash);

240
241
242
243
	/* Advance the old bucket pointer one or more times until it
	 * reaches a node that doesn't hash to the same bucket as the
	 * previous node p. Call the previous node p;
	 */
244
245
246
	rht_for_each_continue(he, p->next, old_tbl, old_hash) {
		new_hash2 = head_hashfn(ht, new_tbl, he);
		if (new_hash != new_hash2)
247
248
249
			break;
		p = he;
	}
250
251
252
253
254
255
256
257
258
259
260
	rcu_assign_pointer(old_tbl->buckets[old_hash], p->next);

	spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);

	/* If we have encountered an entry that maps to a different bucket in
	 * the new table, lock down that bucket as well as we might cut off
	 * the end of the chain.
	 */
	new_bucket_lock2 = bucket_lock(new_tbl, new_hash);
	if (new_bucket_lock != new_bucket_lock2)
		spin_lock_bh_nested(new_bucket_lock2, RHT_LOCK_NESTED2);
261
262
263
264

	/* Find the subsequent node which does hash to the same
	 * bucket as node P, or NULL if no such node exists.
	 */
265
266
	INIT_RHT_NULLS_HEAD(next, ht, old_hash);
	if (!rht_is_a_nulls(he)) {
267
268
		rht_for_each_continue(he, he->next, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
269
270
271
272
273
274
275
276
277
				next = he;
				break;
			}
		}
	}

	/* Set p's next pointer to that subsequent node pointer,
	 * bypassing the nodes which do not hash to p's bucket
	 */
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
	rcu_assign_pointer(p->next, next);

	if (new_bucket_lock != new_bucket_lock2)
		spin_unlock_bh(new_bucket_lock2);
	spin_unlock_bh(new_bucket_lock);
}

static void link_old_to_new(struct bucket_table *new_tbl,
			    unsigned int new_hash, struct rhash_head *entry)
{
	spinlock_t *new_bucket_lock;

	new_bucket_lock = bucket_lock(new_tbl, new_hash);

	spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);
	rcu_assign_pointer(*bucket_tail(new_tbl, new_hash), entry);
	spin_unlock_bh(new_bucket_lock);
295
296
297
298
299
300
301
302
303
304
305
306
}

/**
 * rhashtable_expand - Expand hash table while allowing concurrent lookups
 * @ht:		the hash table to expand
 *
 * A secondary bucket array is allocated and the hash entries are migrated
 * while keeping them on both lists until the end of the RCU grace period.
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
307
308
309
310
311
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
312
 */
313
int rhashtable_expand(struct rhashtable *ht)
314
315
316
{
	struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
	struct rhash_head *he;
317
318
319
	spinlock_t *old_bucket_lock;
	unsigned int new_hash, old_hash;
	bool complete = false;
320
321
322

	ASSERT_RHT_MUTEX(ht);

323
	new_tbl = bucket_table_alloc(ht, old_tbl->size * 2);
324
325
326
	if (new_tbl == NULL)
		return -ENOMEM;

327
	atomic_inc(&ht->shift);
328

329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
	/* Make insertions go into the new, empty table right away. Deletions
	 * and lookups will be attempted in both tables until we synchronize.
	 * The synchronize_rcu() guarantees for the new table to be picked up
	 * so no new additions go into the old table while we relink.
	 */
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();

	/* For each new bucket, search the corresponding old bucket for the
	 * first entry that hashes to the new bucket, and link the end of
	 * newly formed bucket chain (containing entries added to future
	 * table) to that entry. Since all the entries which will end up in
	 * the new bucket appear in the same old bucket, this constructs an
	 * entirely valid new hash table, but with multiple buckets
	 * "zipped" together into a single imprecise chain.
344
	 */
345
346
347
348
349
350
351
352
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
		old_hash = rht_bucket_index(old_tbl, new_hash);
		old_bucket_lock = bucket_lock(old_tbl, old_hash);

		spin_lock_bh(old_bucket_lock);
		rht_for_each(he, old_tbl, old_hash) {
			if (head_hashfn(ht, new_tbl, he) == new_hash) {
				link_old_to_new(new_tbl, new_hash, he);
353
354
355
				break;
			}
		}
356
		spin_unlock_bh(old_bucket_lock);
357
358
359
	}

	/* Publish the new table pointer. Lookups may now traverse
360
361
	 * the new table, but they will not benefit from any
	 * additional efficiency until later steps unzip the buckets.
362
363
364
365
	 */
	rcu_assign_pointer(ht->tbl, new_tbl);

	/* Unzip interleaved hash chains */
366
	while (!complete && !ht->being_destroyed) {
367
368
369
370
371
372
373
374
375
376
377
		/* Wait for readers. All new readers will see the new
		 * table, and thus no references to the old table will
		 * remain.
		 */
		synchronize_rcu();

		/* For each bucket in the old table (each of which
		 * contains items from multiple buckets of the new
		 * table): ...
		 */
		complete = true;
378
		for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
379
380
			struct rhash_head *head;

381
382
383
384
			old_bucket_lock = bucket_lock(old_tbl, old_hash);
			spin_lock_bh(old_bucket_lock);

			hashtable_chain_unzip(ht, new_tbl, old_tbl, old_hash);
385
386
387
			head = rht_dereference_bucket(old_tbl->buckets[old_hash],
						      old_tbl, old_hash);
			if (!rht_is_a_nulls(head))
388
				complete = false;
389
390

			spin_unlock_bh(old_bucket_lock);
391
		}
392
	}
393
394
395
396
397
398
399
400
401
402
403
404
405

	bucket_table_free(old_tbl);
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_expand);

/**
 * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
 * @ht:		the hash table to shrink
 *
 * This function may only be called in a context where it is safe to call
 * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
 *
406
407
408
 * The caller must ensure that no concurrent resizing occurs by holding
 * ht->mutex.
 *
409
410
 * The caller must ensure that no concurrent table mutations take place.
 * It is however valid to have concurrent lookups if they are RCU protected.
411
412
413
 *
 * It is valid to have concurrent insertions and deletions protected by per
 * bucket locks or concurrent RCU protected lookups and traversals.
414
 */
415
int rhashtable_shrink(struct rhashtable *ht)
416
{
417
418
419
	struct bucket_table *new_tbl, *tbl = rht_dereference(ht->tbl, ht);
	spinlock_t *new_bucket_lock, *old_bucket_lock1, *old_bucket_lock2;
	unsigned int new_hash;
420
421
422

	ASSERT_RHT_MUTEX(ht);

423
424
	new_tbl = bucket_table_alloc(ht, tbl->size / 2);
	if (new_tbl == NULL)
425
426
		return -ENOMEM;

427
428
	rcu_assign_pointer(ht->future_tbl, new_tbl);
	synchronize_rcu();
429

430
431
432
433
434
435
436
437
438
	/* Link the first entry in the old bucket to the end of the
	 * bucket in the new table. As entries are concurrently being
	 * added to the new table, lock down the new bucket. As we
	 * always divide the size in half when shrinking, each bucket
	 * in the new table maps to exactly two buckets in the old
	 * table.
	 *
	 * As removals can occur concurrently on the old table, we need
	 * to lock down both matching buckets in the old table.
439
	 */
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
	for (new_hash = 0; new_hash < new_tbl->size; new_hash++) {
		old_bucket_lock1 = bucket_lock(tbl, new_hash);
		old_bucket_lock2 = bucket_lock(tbl, new_hash + new_tbl->size);
		new_bucket_lock = bucket_lock(new_tbl, new_hash);

		spin_lock_bh(old_bucket_lock1);
		spin_lock_bh_nested(old_bucket_lock2, RHT_LOCK_NESTED);
		spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED2);

		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash]);
		rcu_assign_pointer(*bucket_tail(new_tbl, new_hash),
				   tbl->buckets[new_hash + new_tbl->size]);

		spin_unlock_bh(new_bucket_lock);
		spin_unlock_bh(old_bucket_lock2);
		spin_unlock_bh(old_bucket_lock1);
457
458
459
	}

	/* Publish the new, valid hash table */
460
	rcu_assign_pointer(ht->tbl, new_tbl);
461
	atomic_dec(&ht->shift);
462
463
464
465
466
467
468
469
470
471
472
473

	/* Wait for readers. No new readers will have references to the
	 * old hash table.
	 */
	synchronize_rcu();

	bucket_table_free(tbl);

	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_shrink);

474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
static void rht_deferred_worker(struct work_struct *work)
{
	struct rhashtable *ht;
	struct bucket_table *tbl;

	ht = container_of(work, struct rhashtable, run_work.work);
	mutex_lock(&ht->mutex);
	tbl = rht_dereference(ht->tbl, ht);

	if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
		rhashtable_expand(ht);
	else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size))
		rhashtable_shrink(ht);

	mutex_unlock(&ht->mutex);
}

491
492
493
494
495
496
497
498
499
500
501
502
503
static void rhashtable_wakeup_worker(struct rhashtable *ht)
{
	struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
	struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	size_t size = tbl->size;

	/* Only adjust the table if no resizing is currently in progress. */
	if (tbl == new_tbl &&
	    ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) ||
	     (ht->p.shrink_decision && ht->p.shrink_decision(ht, size))))
		schedule_delayed_work(&ht->run_work, 0);
}

504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
				struct bucket_table *tbl, u32 hash)
{
	struct rhash_head *head = rht_dereference_bucket(tbl->buckets[hash],
							 tbl, hash);

	if (rht_is_a_nulls(head))
		INIT_RHT_NULLS_HEAD(obj->next, ht, hash);
	else
		RCU_INIT_POINTER(obj->next, head);

	rcu_assign_pointer(tbl->buckets[hash], obj);

	atomic_inc(&ht->nelems);

	rhashtable_wakeup_worker(ht);
}

522
/**
523
 * rhashtable_insert - insert object into hash table
524
525
526
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
527
528
529
 * Will take a per bucket spinlock to protect against mutual mutations
 * on the same bucket. Multiple insertions may occur in parallel unless
 * they map to the same bucket lock.
530
 *
531
532
533
534
535
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
536
 */
537
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj)
538
{
539
540
541
	struct bucket_table *tbl;
	spinlock_t *lock;
	unsigned hash;
542

543
	rcu_read_lock();
544

545
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
546
	hash = head_hashfn(ht, tbl, obj);
547
548
549
	lock = bucket_lock(tbl, hash);

	spin_lock_bh(lock);
550
	__rhashtable_insert(ht, obj, tbl, hash);
551
	spin_unlock_bh(lock);
552

553
	rcu_read_unlock();
554
555
556
557
558
559
560
561
562
563
564
565
}
EXPORT_SYMBOL_GPL(rhashtable_insert);

/**
 * rhashtable_remove - remove object from hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Since the hash chain is single linked, the removal operation needs to
 * walk the bucket chain upon removal. The removal operation is thus
 * considerable slow if the hash table is not correctly sized.
 *
566
 * Will automatically shrink the table via rhashtable_expand() if the
567
568
569
570
571
 * shrink_decision function specified at rhashtable_init() returns true.
 *
 * The caller must ensure that no concurrent table mutations occur. It is
 * however valid to have concurrent lookups if they are RCU protected.
 */
572
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj)
573
{
574
	struct bucket_table *tbl;
575
576
	struct rhash_head __rcu **pprev;
	struct rhash_head *he;
577
578
	spinlock_t *lock;
	unsigned int hash;
579

580
581
582
	rcu_read_lock();
	tbl = rht_dereference_rcu(ht->tbl, ht);
	hash = head_hashfn(ht, tbl, obj);
583

584
585
	lock = bucket_lock(tbl, hash);
	spin_lock_bh(lock);
586

587
588
589
restart:
	pprev = &tbl->buckets[hash];
	rht_for_each(he, tbl, hash) {
590
591
592
593
594
		if (he != obj) {
			pprev = &he->next;
			continue;
		}

595
596
		rcu_assign_pointer(*pprev, obj->next);
		atomic_dec(&ht->nelems);
597

598
599
		spin_unlock_bh(lock);

600
		rhashtable_wakeup_worker(ht);
601
602

		rcu_read_unlock();
603

604
605
606
		return true;
	}

607
	if (tbl != rht_dereference_rcu(ht->future_tbl, ht)) {
608
609
		spin_unlock_bh(lock);

610
		tbl = rht_dereference_rcu(ht->future_tbl, ht);
611
612
613
614
615
616
617
618
619
620
		hash = head_hashfn(ht, tbl, obj);

		lock = bucket_lock(tbl, hash);
		spin_lock_bh(lock);
		goto restart;
	}

	spin_unlock_bh(lock);
	rcu_read_unlock();

621
622
623
624
	return false;
}
EXPORT_SYMBOL_GPL(rhashtable_remove);

625
626
627
628
629
630
631
632
633
634
635
636
637
struct rhashtable_compare_arg {
	struct rhashtable *ht;
	const void *key;
};

static bool rhashtable_compare(void *ptr, void *arg)
{
	struct rhashtable_compare_arg *x = arg;
	struct rhashtable *ht = x->ht;

	return !memcmp(ptr + ht->p.key_offset, x->key, ht->p.key_len);
}

638
639
640
641
642
643
644
645
646
/**
 * rhashtable_lookup - lookup key in hash table
 * @ht:		hash table
 * @key:	pointer to key
 *
 * Computes the hash value for the key and traverses the bucket chain looking
 * for a entry with an identical key. The first matching entry is returned.
 *
 * This lookup function may only be used for fixed key hash table (key_len
647
 * parameter set). It will BUG() if used inappropriately.
648
 *
649
 * Lookups may occur in parallel with hashtable mutations and resizing.
650
 */
651
void *rhashtable_lookup(struct rhashtable *ht, const void *key)
652
{
653
654
655
656
	struct rhashtable_compare_arg arg = {
		.ht = ht,
		.key = key,
	};
657
658
659

	BUG_ON(!ht->p.key_len);

660
	return rhashtable_lookup_compare(ht, key, &rhashtable_compare, &arg);
661
662
663
664
665
666
}
EXPORT_SYMBOL_GPL(rhashtable_lookup);

/**
 * rhashtable_lookup_compare - search hash table with compare function
 * @ht:		hash table
667
 * @key:	the pointer to the key
668
669
670
671
672
673
 * @compare:	compare function, must return true on match
 * @arg:	argument passed on to compare function
 *
 * Traverses the bucket chain behind the provided hash value and calls the
 * specified compare function for each entry.
 *
674
 * Lookups may occur in parallel with hashtable mutations and resizing.
675
676
677
 *
 * Returns the first entry on which the compare function returned true.
 */
678
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
679
680
				bool (*compare)(void *, void *), void *arg)
{
681
	const struct bucket_table *tbl, *old_tbl;
682
	struct rhash_head *he;
683
	u32 hash;
684

685
686
687
688
	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	tbl = rht_dereference_rcu(ht->future_tbl, ht);
689
	hash = key_hashfn(ht, key, ht->p.key_len);
690
691
restart:
	rht_for_each_rcu(he, tbl, rht_bucket_index(tbl, hash)) {
692
693
		if (!compare(rht_obj(ht, he), arg))
			continue;
694
		rcu_read_unlock();
695
		return rht_obj(ht, he);
696
697
	}

698
699
700
701
702
703
	if (unlikely(tbl != old_tbl)) {
		tbl = old_tbl;
		goto restart;
	}
	rcu_read_unlock();

704
705
706
707
	return NULL;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);

708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
/**
 * rhashtable_lookup_insert - lookup and insert object into hash table
 * @ht:		hash table
 * @obj:	pointer to hash head inside object
 *
 * Locks down the bucket chain in both the old and new table if a resize
 * is in progress to ensure that writers can't remove from the old table
 * and can't insert to the new table during the atomic operation of search
 * and insertion. Searches for duplicates in both the old and new table if
 * a resize is in progress.
 *
 * This lookup function may only be used for fixed key hash table (key_len
 * parameter set). It will BUG() if used inappropriately.
 *
 * It is safe to call this function from atomic context.
 *
 * Will trigger an automatic deferred table resizing if the size grows
 * beyond the watermark indicated by grow_decision() which can be passed
 * to rhashtable_init().
 */
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj)
{
	struct bucket_table *new_tbl, *old_tbl;
	spinlock_t *new_bucket_lock, *old_bucket_lock;
	u32 new_hash, old_hash;
	bool success = true;

	BUG_ON(!ht->p.key_len);

	rcu_read_lock();

	old_tbl = rht_dereference_rcu(ht->tbl, ht);
	old_hash = head_hashfn(ht, old_tbl, obj);
	old_bucket_lock = bucket_lock(old_tbl, old_hash);
	spin_lock_bh(old_bucket_lock);

	new_tbl = rht_dereference_rcu(ht->future_tbl, ht);
	new_hash = head_hashfn(ht, new_tbl, obj);
	new_bucket_lock = bucket_lock(new_tbl, new_hash);
	if (unlikely(old_tbl != new_tbl))
		spin_lock_bh_nested(new_bucket_lock, RHT_LOCK_NESTED);

	if (rhashtable_lookup(ht, rht_obj(ht, obj) + ht->p.key_offset)) {
		success = false;
		goto exit;
	}

	__rhashtable_insert(ht, obj, new_tbl, new_hash);

exit:
	if (unlikely(old_tbl != new_tbl))
		spin_unlock_bh(new_bucket_lock);
	spin_unlock_bh(old_bucket_lock);

	rcu_read_unlock();

	return success;
}
EXPORT_SYMBOL_GPL(rhashtable_lookup_insert);

768
static size_t rounded_hashtable_size(struct rhashtable_params *params)
769
{
770
771
	return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
		   1UL << params->min_shift);
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
}

/**
 * rhashtable_init - initialize a new hash table
 * @ht:		hash table to be initialized
 * @params:	configuration parameters
 *
 * Initializes a new hash table based on the provided configuration
 * parameters. A table can be configured either with a variable or
 * fixed length key:
 *
 * Configuration Example 1: Fixed length keys
 * struct test_obj {
 *	int			key;
 *	void *			my_member;
 *	struct rhash_head	node;
 * };
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
 *	.key_offset = offsetof(struct test_obj, key),
 *	.key_len = sizeof(int),
794
 *	.hashfn = jhash,
795
 *	.nulls_base = (1U << RHT_BASE_SHIFT),
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
 * };
 *
 * Configuration Example 2: Variable length keys
 * struct test_obj {
 *	[...]
 *	struct rhash_head	node;
 * };
 *
 * u32 my_hash_fn(const void *data, u32 seed)
 * {
 *	struct test_obj *obj = data;
 *
 *	return [... hash ...];
 * }
 *
 * struct rhashtable_params params = {
 *	.head_offset = offsetof(struct test_obj, node),
813
 *	.hashfn = jhash,
814
815
816
817
818
819
820
821
822
823
824
825
826
827
 *	.obj_hashfn = my_hash_fn,
 * };
 */
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
{
	struct bucket_table *tbl;
	size_t size;

	size = HASH_DEFAULT_SIZE;

	if ((params->key_len && !params->hashfn) ||
	    (!params->key_len && !params->obj_hashfn))
		return -EINVAL;

828
829
830
	if (params->nulls_base && params->nulls_base < (1U << RHT_BASE_SHIFT))
		return -EINVAL;

831
832
833
	params->min_shift = max_t(size_t, params->min_shift,
				  ilog2(HASH_MIN_SIZE));

834
	if (params->nelem_hint)
835
		size = rounded_hashtable_size(params);
836

837
838
839
840
841
842
843
844
845
846
	memset(ht, 0, sizeof(*ht));
	mutex_init(&ht->mutex);
	memcpy(&ht->p, params, sizeof(*params));

	if (params->locks_mul)
		ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
	else
		ht->p.locks_mul = BUCKET_LOCKS_PER_CPU;

	tbl = bucket_table_alloc(ht, size);
847
848
849
	if (tbl == NULL)
		return -ENOMEM;

850
	atomic_set(&ht->shift, ilog2(tbl->size));
851
	RCU_INIT_POINTER(ht->tbl, tbl);
852
	RCU_INIT_POINTER(ht->future_tbl, tbl);
853
854
855
856

	if (!ht->p.hash_rnd)
		get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));

857
858
859
	if (ht->p.grow_decision || ht->p.shrink_decision)
		INIT_DEFERRABLE_WORK(&ht->run_work, rht_deferred_worker);

860
861
862
863
864
865
866
867
	return 0;
}
EXPORT_SYMBOL_GPL(rhashtable_init);

/**
 * rhashtable_destroy - destroy hash table
 * @ht:		the hash table to destroy
 *
868
869
870
 * Frees the bucket array. This function is not rcu safe, therefore the caller
 * has to make sure that no resizing may happen by unpublishing the hashtable
 * and waiting for the quiescent cycle before releasing the bucket array.
871
 */
872
void rhashtable_destroy(struct rhashtable *ht)
873
{
874
875
876
877
878
879
880
881
	ht->being_destroyed = true;

	mutex_lock(&ht->mutex);

	cancel_delayed_work(&ht->run_work);
	bucket_table_free(rht_dereference(ht->tbl, ht));

	mutex_unlock(&ht->mutex);
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
}
EXPORT_SYMBOL_GPL(rhashtable_destroy);

/**************************************************************************
 * Self Test
 **************************************************************************/

#ifdef CONFIG_TEST_RHASHTABLE

#define TEST_HT_SIZE	8
#define TEST_ENTRIES	2048
#define TEST_PTR	((void *) 0xdeadbeef)
#define TEST_NEXPANDS	4

struct test_obj {
	void			*ptr;
	int			value;
	struct rhash_head	node;
};

static int __init test_rht_lookup(struct rhashtable *ht)
{
	unsigned int i;

	for (i = 0; i < TEST_ENTRIES * 2; i++) {
		struct test_obj *obj;
		bool expected = !(i % 2);
		u32 key = i;

		obj = rhashtable_lookup(ht, &key);

		if (expected && !obj) {
			pr_warn("Test failed: Could not find key %u\n", key);
			return -ENOENT;
		} else if (!expected && obj) {
			pr_warn("Test failed: Unexpected entry found for key %u\n",
				key);
			return -EEXIST;
		} else if (expected && obj) {
			if (obj->ptr != TEST_PTR || obj->value != i) {
				pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
					obj->ptr, TEST_PTR, obj->value, i);
				return -EINVAL;
			}
		}
	}

	return 0;
}

932
static void test_bucket_stats(struct rhashtable *ht, bool quiet)
933
{
934
	unsigned int cnt, rcu_cnt, i, total = 0;
935
	struct rhash_head *pos;
936
	struct test_obj *obj;
937
	struct bucket_table *tbl;
938

939
	tbl = rht_dereference_rcu(ht->tbl, ht);
940
	for (i = 0; i < tbl->size; i++) {
941
		rcu_cnt = cnt = 0;
942
943
944
945

		if (!quiet)
			pr_info(" [%#4x/%zu]", i, tbl->size);

946
		rht_for_each_entry_rcu(obj, pos, tbl, i, node) {
947
948
949
950
951
952
			cnt++;
			total++;
			if (!quiet)
				pr_cont(" [%p],", obj);
		}

953
		rht_for_each_entry_rcu(obj, pos, tbl, i, node)
954
955
956
957
958
959
			rcu_cnt++;

		if (rcu_cnt != cnt)
			pr_warn("Test failed: Chain count mismach %d != %d",
				cnt, rcu_cnt);

960
961
962
963
964
		if (!quiet)
			pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
				i, tbl->buckets[i], cnt);
	}

965
966
	pr_info("  Traversal complete: counted=%u, nelems=%u, entries=%d\n",
		total, atomic_read(&ht->nelems), TEST_ENTRIES);
967

968
	if (total != atomic_read(&ht->nelems) || total != TEST_ENTRIES)
969
		pr_warn("Test failed: Total count mismatch ^^^");
970
971
972
973
974
}

static int __init test_rhashtable(struct rhashtable *ht)
{
	struct bucket_table *tbl;
975
976
	struct test_obj *obj;
	struct rhash_head *pos, *next;
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
	int err;
	unsigned int i;

	/*
	 * Insertion Test:
	 * Insert TEST_ENTRIES into table with all keys even numbers
	 */
	pr_info("  Adding %d keys\n", TEST_ENTRIES);
	for (i = 0; i < TEST_ENTRIES; i++) {
		struct test_obj *obj;

		obj = kzalloc(sizeof(*obj), GFP_KERNEL);
		if (!obj) {
			err = -ENOMEM;
			goto error;
		}

		obj->ptr = TEST_PTR;
		obj->value = i * 2;

997
		rhashtable_insert(ht, &obj->node);
998
999
1000
	}

	rcu_read_lock();
1001
	test_bucket_stats(ht, true);
1002
1003
1004
1005
1006
	test_rht_lookup(ht);
	rcu_read_unlock();

	for (i = 0; i < TEST_NEXPANDS; i++) {
		pr_info("  Table expansion iteration %u...\n", i);
1007
		mutex_lock(&ht->mutex);
1008
		rhashtable_expand(ht);
1009
		mutex_unlock(&ht->mutex);
1010
1011
1012
1013
1014
1015
1016
1017
1018

		rcu_read_lock();
		pr_info("  Verifying lookups...\n");
		test_rht_lookup(ht);
		rcu_read_unlock();
	}

	for (i = 0; i < TEST_NEXPANDS; i++) {
		pr_info("  Table shrinkage iteration %u...\n", i);
1019
		mutex_lock(&ht->mutex);
1020
		rhashtable_shrink(ht);
1021
		mutex_unlock(&ht->mutex);
1022
1023
1024
1025
1026
1027
1028

		rcu_read_lock();
		pr_info("  Verifying lookups...\n");
		test_rht_lookup(ht);
		rcu_read_unlock();
	}

1029
1030
1031
1032
	rcu_read_lock();
	test_bucket_stats(ht, true);
	rcu_read_unlock();

1033
1034
1035
1036
1037
1038
1039
	pr_info("  Deleting %d keys\n", TEST_ENTRIES);
	for (i = 0; i < TEST_ENTRIES; i++) {
		u32 key = i * 2;

		obj = rhashtable_lookup(ht, &key);
		BUG_ON(!obj);

1040
		rhashtable_remove(ht, &obj->node);
1041
1042
1043
1044
1045
1046
1047
1048
		kfree(obj);
	}

	return 0;

error:
	tbl = rht_dereference_rcu(ht->tbl, ht);
	for (i = 0; i < tbl->size; i++)
1049
		rht_for_each_entry_safe(obj, pos, next, tbl, i, node)
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
			kfree(obj);

	return err;
}

static int __init test_rht_init(void)
{
	struct rhashtable ht;
	struct rhashtable_params params = {
		.nelem_hint = TEST_HT_SIZE,
		.head_offset = offsetof(struct test_obj, node),
		.key_offset = offsetof(struct test_obj, value),
		.key_len = sizeof(int),
1063
		.hashfn = jhash,
1064
		.nulls_base = (3U << RHT_BASE_SHIFT),
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
		.grow_decision = rht_grow_above_75,
		.shrink_decision = rht_shrink_below_30,
	};
	int err;

	pr_info("Running resizable hashtable tests...\n");

	err = rhashtable_init(&ht, &params);
	if (err < 0) {
		pr_warn("Test failed: Unable to initialize hashtable: %d\n",
			err);
		return err;
	}

	err = test_rhashtable(&ht);

	rhashtable_destroy(&ht);

	return err;
}

subsys_initcall(test_rht_init);

#endif /* CONFIG_TEST_RHASHTABLE */