mesh_pathtbl.c 30.2 KB
Newer Older
1
/*
2
 * Copyright (c) 2008, 2009 open80211s Ltd.
3
4
5
6
7
8
9
10
11
12
 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
13
#include <linux/slab.h>
14
15
16
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
17
#include "wme.h"
18
19
20
#include "ieee80211_i.h"
#include "mesh.h"

21
22
23
24
25
26
#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
#define mpath_dbg(fmt, args...)	printk(KERN_DEBUG fmt, ##args)
#else
#define mpath_dbg(fmt, args...)	do { (void)(0); } while (0)
#endif

27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER	2

/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN		2

#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
				time_after(jiffies, mpath->exp_time) && \
				!(mpath->flags & MESH_PATH_FIXED))

struct mpath_node {
	struct hlist_node list;
	struct rcu_head rcu;
	/* This indirection allows two different tables to point to the same
	 * mesh_path structure, useful when resizing
	 */
	struct mesh_path *mpath;
};

46
47
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
48

49
int mesh_paths_generation;
50
51

/* This lock will have the grow table function as writer and add / delete nodes
52
53
54
55
 * as readers. RCU provides sufficient protection only when reading the table
 * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
 * the read lock or we risk operating on an old table.  The write lock is only
 * needed when modifying the number of buckets a table.
56
57
58
59
 */
static DEFINE_RWLOCK(pathtbl_resize_lock);


60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
	return rcu_dereference_protected(mesh_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
	return rcu_dereference_protected(mpp_paths,
		lockdep_is_held(&pathtbl_resize_lock));
}

/*
 * CAREFUL -- "tbl" must not be an expression,
 * in particular not an rcu_dereference(), since
 * it's used twice. So it is illegal to do
 *	for_each_mesh_entry(rcu_dereference(...), ...)
 */
#define for_each_mesh_entry(tbl, p, node, i) \
	for (i = 0; i <= tbl->hash_mask; i++) \
		hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)


83
84
85
86
87
static struct mesh_table *mesh_table_alloc(int size_order)
{
	int i;
	struct mesh_table *newtbl;

88
	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
89
90
91
92
	if (!newtbl)
		return NULL;

	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
93
			(1 << size_order), GFP_ATOMIC);
94
95
96
97
98
99
100

	if (!newtbl->hash_buckets) {
		kfree(newtbl);
		return NULL;
	}

	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
101
			(1 << size_order), GFP_ATOMIC);
102
103
104
105
106
107
108
109
110
111
112
113
114
	if (!newtbl->hashwlock) {
		kfree(newtbl->hash_buckets);
		kfree(newtbl);
		return NULL;
	}

	newtbl->size_order = size_order;
	newtbl->hash_mask = (1 << size_order) - 1;
	atomic_set(&newtbl->entries,  0);
	get_random_bytes(&newtbl->hash_rnd,
			sizeof(newtbl->hash_rnd));
	for (i = 0; i <= newtbl->hash_mask; i++)
		spin_lock_init(&newtbl->hashwlock[i]);
115
	spin_lock_init(&newtbl->gates_lock);
116
117
118
119

	return newtbl;
}

120
121
122
123
124
125
126
static void __mesh_table_free(struct mesh_table *tbl)
{
	kfree(tbl->hash_buckets);
	kfree(tbl->hashwlock);
	kfree(tbl);
}

127
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
128
129
130
{
	struct hlist_head *mesh_hash;
	struct hlist_node *p, *q;
131
	struct mpath_node *gate;
132
133
134
135
	int i;

	mesh_hash = tbl->hash_buckets;
	for (i = 0; i <= tbl->hash_mask; i++) {
136
		spin_lock_bh(&tbl->hashwlock[i]);
137
138
139
140
		hlist_for_each_safe(p, q, &mesh_hash[i]) {
			tbl->free_node(p, free_leafs);
			atomic_dec(&tbl->entries);
		}
141
		spin_unlock_bh(&tbl->hashwlock[i]);
142
	}
143
144
145
146
147
148
149
150
151
152
153
	if (free_leafs) {
		spin_lock_bh(&tbl->gates_lock);
		hlist_for_each_entry_safe(gate, p, q,
					 tbl->known_gates, list) {
			hlist_del(&gate->list);
			kfree(gate);
		}
		kfree(tbl->known_gates);
		spin_unlock_bh(&tbl->gates_lock);
	}

154
155
156
	__mesh_table_free(tbl);
}

157
static int mesh_table_grow(struct mesh_table *oldtbl,
158
			   struct mesh_table *newtbl)
159
160
161
162
163
{
	struct hlist_head *oldhash;
	struct hlist_node *p, *q;
	int i;

164
165
166
	if (atomic_read(&oldtbl->entries)
			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
		return -EAGAIN;
167

168
169
170
	newtbl->free_node = oldtbl->free_node;
	newtbl->mean_chain_len = oldtbl->mean_chain_len;
	newtbl->copy_node = oldtbl->copy_node;
171
	newtbl->known_gates = oldtbl->known_gates;
172
	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
173

174
175
	oldhash = oldtbl->hash_buckets;
	for (i = 0; i <= oldtbl->hash_mask; i++)
176
		hlist_for_each(p, &oldhash[i])
177
			if (oldtbl->copy_node(p, newtbl) < 0)
178
179
				goto errcopy;

180
	return 0;
181
182
183
184

errcopy:
	for (i = 0; i <= newtbl->hash_mask; i++) {
		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
185
			oldtbl->free_node(p, 0);
186
	}
187
	return -ENOMEM;
188
189
}

190
191
192
193
194
195
196
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
			   struct mesh_table *tbl)
{
	/* Use last four bytes of hw addr and interface index as hash index */
	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
		& tbl->hash_mask;
}
197

198
199
200
201
202
203
204
205
206
207
208
209

/**
 *
 * mesh_path_assign_nexthop - update mesh path next hop
 *
 * @mpath: mesh path to update
 * @sta: next hop to assign
 *
 * Locking: mpath->state_lock must be held when calling this function
 */
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
210
211
212
213
214
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	struct sk_buff_head tmpq;
	unsigned long flags;

215
	rcu_assign_pointer(mpath->next_hop, sta);
216
217
218
219
220
221
222
223

	__skb_queue_head_init(&tmpq);

	spin_lock_irqsave(&mpath->frame_queue.lock, flags);

	while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
		hdr = (struct ieee80211_hdr *) skb->data;
		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
224
		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
225
226
227
228
229
		__skb_queue_tail(&tmpq, skb);
	}

	skb_queue_splice(&tmpq, &mpath->frame_queue);
	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
230
231
}

232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
			     struct mesh_path *gate_mpath)
{
	struct ieee80211_hdr *hdr;
	struct ieee80211s_hdr *mshdr;
	int mesh_hdrlen, hdrlen;
	char *next_hop;

	hdr = (struct ieee80211_hdr *) skb->data;
	hdrlen = ieee80211_hdrlen(hdr->frame_control);
	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);

	if (!(mshdr->flags & MESH_FLAGS_AE)) {
		/* size of the fixed part of the mesh header */
		mesh_hdrlen = 6;

		/* make room for the two extended addresses */
		skb_push(skb, 2 * ETH_ALEN);
		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);

		hdr = (struct ieee80211_hdr *) skb->data;

		/* we preserve the previous mesh header and only add
		 * the new addreses */
		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
		mshdr->flags = MESH_FLAGS_AE_A5_A6;
		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
	}

	/* update next hop */
	hdr = (struct ieee80211_hdr *) skb->data;
	rcu_read_lock();
	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
	memcpy(hdr->addr1, next_hop, ETH_ALEN);
	rcu_read_unlock();
268
	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}

/**
 *
 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
 *
 * This function is used to transfer or copy frames from an unresolved mpath to
 * a gate mpath.  The function also adds the Address Extension field and
 * updates the next hop.
 *
 * If a frame already has an Address Extension field, only the next hop and
 * destination addresses are updated.
 *
 * The gate mpath must be an active mpath with a valid mpath->next_hop.
 *
 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
 * @from_mpath: The failed mpath
 * @copy: When true, copy all the frames to the new mpath queue.  When false,
 * move them.
 */
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
				    struct mesh_path *from_mpath,
				    bool copy)
{
Thomas Pedersen's avatar
Thomas Pedersen committed
294
	struct sk_buff *skb, *cp_skb = NULL;
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
	struct sk_buff_head gateq, failq;
	unsigned long flags;
	int num_skbs;

	BUG_ON(gate_mpath == from_mpath);
	BUG_ON(!gate_mpath->next_hop);

	__skb_queue_head_init(&gateq);
	__skb_queue_head_init(&failq);

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);

	num_skbs = skb_queue_len(&failq);

	while (num_skbs--) {
		skb = __skb_dequeue(&failq);
313
		if (copy) {
314
			cp_skb = skb_copy(skb, GFP_ATOMIC);
315
316
317
			if (cp_skb)
				__skb_queue_tail(&failq, cp_skb);
		}
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337

		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
		__skb_queue_tail(&gateq, skb);
	}

	spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
	skb_queue_splice(&gateq, &gate_mpath->frame_queue);
	mpath_dbg("Mpath queue for gate %pM has %d frames\n",
			gate_mpath->dst,
			skb_queue_len(&gate_mpath->frame_queue));
	spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);

	if (!copy)
		return;

	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
	skb_queue_splice(&failq, &from_mpath->frame_queue);
	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}

338

339
340
static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
					  struct ieee80211_sub_if_data *sdata)
341
342
343
344
345
346
{
	struct mesh_path *mpath;
	struct hlist_node *n;
	struct hlist_head *bucket;
	struct mpath_node *node;

347
	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
348
349
	hlist_for_each_entry_rcu(node, n, bucket, list) {
		mpath = node->mpath;
350
		if (mpath->sdata == sdata &&
351
352
353
				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
			if (MPATH_EXPIRED(mpath)) {
				spin_lock_bh(&mpath->state_lock);
354
				mpath->flags &= ~MESH_PATH_ACTIVE;
355
356
357
358
359
360
361
362
				spin_unlock_bh(&mpath->state_lock);
			}
			return mpath;
		}
	}
	return NULL;
}

363
364
365
366
367
368
369
370
371
372
/**
 * mesh_path_lookup - look up a path in the mesh path table
 * @dst: hardware address (ETH_ALEN length) of destination
 * @sdata: local subif
 *
 * Returns: pointer to the mesh path structure, or NULL if not found
 *
 * Locking: must be called within a read rcu section.
 */
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
373
{
374
375
	return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
}
376

377
378
379
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
	return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
380
381
382
}


383
384
385
/**
 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
 * @idx: index
386
 * @sdata: local subif, or NULL for all entries
387
388
389
390
391
 *
 * Returns: pointer to the mesh path structure, or NULL if not found.
 *
 * Locking: must be called within a read rcu section.
 */
392
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
393
{
394
	struct mesh_table *tbl = rcu_dereference(mesh_paths);
395
396
397
398
399
	struct mpath_node *node;
	struct hlist_node *p;
	int i;
	int j = 0;

400
	for_each_mesh_entry(tbl, p, node, i) {
401
		if (sdata && node->mpath->sdata != sdata)
402
			continue;
403
404
405
		if (j++ == idx) {
			if (MPATH_EXPIRED(node->mpath)) {
				spin_lock_bh(&node->mpath->state_lock);
406
				node->mpath->flags &= ~MESH_PATH_ACTIVE;
407
408
409
410
				spin_unlock_bh(&node->mpath->state_lock);
			}
			return node->mpath;
		}
411
	}
412
413
414
415

	return NULL;
}

416
417
418
419
420
421
422
static void mesh_gate_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
	kfree(node);
}

/**
423
424
 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
 * @mpath: gate path to add to table
425
 */
426
int mesh_path_add_gate(struct mesh_path *mpath)
427
{
428
	struct mesh_table *tbl;
429
430
431
432
433
	struct mpath_node *gate, *new_gate;
	struct hlist_node *n;
	int err;

	rcu_read_lock();
434
	tbl = rcu_dereference(mesh_paths);
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503

	hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			err = -EEXIST;
			goto err_rcu;
		}

	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
	if (!new_gate) {
		err = -ENOMEM;
		goto err_rcu;
	}

	mpath->is_gate = true;
	mpath->sdata->u.mesh.num_gates++;
	new_gate->mpath = mpath;
	spin_lock_bh(&tbl->gates_lock);
	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
	spin_unlock_bh(&tbl->gates_lock);
	rcu_read_unlock();
	mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
		  mpath->sdata->name, mpath->dst,
		  mpath->sdata->u.mesh.num_gates);
	return 0;
err_rcu:
	rcu_read_unlock();
	return err;
}

/**
 * mesh_gate_del - remove a mesh gate from the list of known gates
 * @tbl: table which holds our list of known gates
 * @mpath: gate mpath
 *
 * Returns: 0 on success
 *
 * Locking: must be called inside rcu_read_lock() section
 */
static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
	struct mpath_node *gate;
	struct hlist_node *p, *q;

	hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
		if (gate->mpath == mpath) {
			spin_lock_bh(&tbl->gates_lock);
			hlist_del_rcu(&gate->list);
			call_rcu(&gate->rcu, mesh_gate_node_reclaim);
			spin_unlock_bh(&tbl->gates_lock);
			mpath->sdata->u.mesh.num_gates--;
			mpath->is_gate = false;
			mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
				  "%d known gates\n", mpath->sdata->name,
				  mpath->dst, mpath->sdata->u.mesh.num_gates);
			break;
		}

	return 0;
}

/**
 * mesh_gate_num - number of gates known to this interface
 * @sdata: subif data
 */
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
	return sdata->u.mesh.num_gates;
}

504
505
506
/**
 * mesh_path_add - allocate and add a new path to the mesh path table
 * @addr: destination address of the path (ETH_ALEN length)
507
 * @sdata: local subif
508
 *
509
 * Returns: 0 on success
510
511
512
 *
 * State: the initial state of the new path is set to 0
 */
513
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
514
{
515
516
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
517
	struct mesh_table *tbl;
518
519
520
521
522
523
524
525
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

526
	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
527
528
529
530
531
532
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

533
	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
534
535
		return -ENOSPC;

536
	err = -ENOMEM;
537
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
538
539
540
	if (!new_mpath)
		goto err_path_alloc;

541
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
542
543
	if (!new_node)
		goto err_node_alloc;
544

545
	read_lock_bh(&pathtbl_resize_lock);
546
	memcpy(new_mpath->dst, dst, ETH_ALEN);
547
	new_mpath->sdata = sdata;
548
549
550
551
552
553
554
555
556
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
	new_mpath->timer.data = (unsigned long) new_mpath;
	new_mpath->timer.function = mesh_path_timer;
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);
	init_timer(&new_mpath->timer);

557
	tbl = resize_dereference_mesh_paths();
558

559
560
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
561

562
	spin_lock_bh(&tbl->hashwlock[hash_idx]);
563

564
	err = -EEXIST;
565
566
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
567
		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
568
			goto err_exists;
569
570
571
	}

	hlist_add_head_rcu(&new_node->list, bucket);
572
573
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
574
575
		grow = 1;

576
577
	mesh_paths_generation++;

578
	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
579
	read_unlock_bh(&pathtbl_resize_lock);
580
	if (grow) {
581
		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
582
		ieee80211_queue_work(&local->hw, &sdata->work);
583
	}
584
585
586
	return 0;

err_exists:
587
	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
588
	read_unlock_bh(&pathtbl_resize_lock);
589
590
591
592
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
593
	atomic_dec(&sdata->u.mesh.mpaths);
594
595
596
	return err;
}

597
598
599
600
601
602
603
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);

	mesh_table_free(tbl, false);
}

604
605
606
607
void mesh_mpath_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

608
	write_lock_bh(&pathtbl_resize_lock);
609
610
	oldtbl = resize_dereference_mesh_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
611
612
	if (!newtbl)
		goto out;
613
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
614
		__mesh_table_free(newtbl);
615
		goto out;
616
617
618
	}
	rcu_assign_pointer(mesh_paths, newtbl);

619
620
621
622
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);

 out:
	write_unlock_bh(&pathtbl_resize_lock);
623
624
625
626
627
628
}

void mesh_mpp_table_grow(void)
{
	struct mesh_table *oldtbl, *newtbl;

629
	write_lock_bh(&pathtbl_resize_lock);
630
631
	oldtbl = resize_dereference_mpp_paths();
	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
632
633
	if (!newtbl)
		goto out;
634
	if (mesh_table_grow(oldtbl, newtbl) < 0) {
635
		__mesh_table_free(newtbl);
636
		goto out;
637
638
	}
	rcu_assign_pointer(mpp_paths, newtbl);
639
	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
640

641
642
 out:
	write_unlock_bh(&pathtbl_resize_lock);
643
}
644

645
646
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
647
648
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_local *local = sdata->local;
649
	struct mesh_table *tbl;
650
651
652
653
654
655
656
657
	struct mesh_path *mpath, *new_mpath;
	struct mpath_node *node, *new_node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int grow = 0;
	int err = 0;
	u32 hash_idx;

658
	if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
659
660
661
662
663
664
665
		/* never add ourselves as neighbours */
		return -ENOTSUPP;

	if (is_multicast_ether_addr(dst))
		return -ENOTSUPP;

	err = -ENOMEM;
666
	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
667
668
669
	if (!new_mpath)
		goto err_path_alloc;

670
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
671
672
673
	if (!new_node)
		goto err_node_alloc;

674
	read_lock_bh(&pathtbl_resize_lock);
675
676
677
678
679
680
	memcpy(new_mpath->dst, dst, ETH_ALEN);
	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
	new_mpath->sdata = sdata;
	new_mpath->flags = 0;
	skb_queue_head_init(&new_mpath->frame_queue);
	new_node->mpath = new_mpath;
Thomas Pedersen's avatar
Thomas Pedersen committed
681
	init_timer(&new_mpath->timer);
682
683
684
	new_mpath->exp_time = jiffies;
	spin_lock_init(&new_mpath->state_lock);

685
	tbl = resize_dereference_mpp_paths();
686

687
688
689
690
	hash_idx = mesh_table_hash(dst, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];

	spin_lock_bh(&tbl->hashwlock[hash_idx]);
691
692
693
694
695
696
697
698
699

	err = -EEXIST;
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
		if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
			goto err_exists;
	}

	hlist_add_head_rcu(&new_node->list, bucket);
700
701
	if (atomic_inc_return(&tbl->entries) >=
	    tbl->mean_chain_len * (tbl->hash_mask + 1))
702
703
		grow = 1;

704
	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
705
	read_unlock_bh(&pathtbl_resize_lock);
706
	if (grow) {
707
		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
708
		ieee80211_queue_work(&local->hw, &sdata->work);
709
710
711
712
	}
	return 0;

err_exists:
713
	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
714
	read_unlock_bh(&pathtbl_resize_lock);
715
716
717
718
719
720
721
722
	kfree(new_node);
err_node_alloc:
	kfree(new_mpath);
err_path_alloc:
	return err;
}


723
724
725
726
727
728
729
730
731
732
/**
 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
 *
 * @sta: broken peer link
 *
 * This function must be called from the rate control algorithm if enough
 * delivery errors suggest that a peer link is no longer usable.
 */
void mesh_plink_broken(struct sta_info *sta)
{
733
	struct mesh_table *tbl;
734
	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
735
736
737
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
738
	struct ieee80211_sub_if_data *sdata = sta->sdata;
739
	int i;
740
	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
741
742

	rcu_read_lock();
743
744
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
745
		mpath = node->mpath;
746
		if (rcu_dereference(mpath->next_hop) == sta &&
747
748
		    mpath->flags & MESH_PATH_ACTIVE &&
		    !(mpath->flags & MESH_PATH_FIXED)) {
749
			spin_lock_bh(&mpath->state_lock);
750
			mpath->flags &= ~MESH_PATH_ACTIVE;
751
			++mpath->sn;
752
			spin_unlock_bh(&mpath->state_lock);
753
754
			mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
					mpath->dst, cpu_to_le32(mpath->sn),
755
					reason, bcast, sdata);
756
		}
757
758
759
760
	}
	rcu_read_unlock();
}

761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;

	del_timer_sync(&node->mpath->timer);
	atomic_dec(&sdata->u.mesh.mpaths);
	kfree(node->mpath);
	kfree(node);
}

/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
	struct mesh_path *mpath;
	mpath = node->mpath;
	spin_lock(&mpath->state_lock);
	mpath->flags |= MESH_PATH_RESOLVING;
	if (mpath->is_gate)
		mesh_gate_del(tbl, mpath);
	hlist_del_rcu(&node->list);
	call_rcu(&node->rcu, mesh_path_node_reclaim);
	spin_unlock(&mpath->state_lock);
	atomic_dec(&tbl->entries);
}

787
788
789
790
791
/**
 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
 *
 * @sta - mesh peer to match
 *
792
793
794
 * RCU notes: this function is called when a mesh plink transitions from
 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
 * allows path creation. This will happen before the sta can be freed (because
795
796
 * sta_info_destroy() calls this) so any reader in a rcu read block will be
 * protected against the plink disappearing.
797
798
799
 */
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
800
	struct mesh_table *tbl;
801
802
803
804
805
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

806
	rcu_read_lock();
807
808
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
809
	for_each_mesh_entry(tbl, p, node, i) {
810
		mpath = node->mpath;
811
		if (rcu_dereference(mpath->next_hop) == sta) {
812
813
814
815
			spin_lock_bh(&tbl->hashwlock[i]);
			__mesh_path_del(tbl, node);
			spin_unlock_bh(&tbl->hashwlock[i]);
		}
816
	}
817
	read_unlock_bh(&pathtbl_resize_lock);
818
	rcu_read_unlock();
819
820
}

821
822
static void table_flush_by_iface(struct mesh_table *tbl,
				 struct ieee80211_sub_if_data *sdata)
823
824
825
826
827
828
{
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

829
	WARN_ON(!rcu_read_lock_held());
830
	for_each_mesh_entry(tbl, p, node, i) {
831
		mpath = node->mpath;
832
833
		if (mpath->sdata != sdata)
			continue;
834
		spin_lock_bh(&tbl->hashwlock[i]);
835
		__mesh_path_del(tbl, node);
836
		spin_unlock_bh(&tbl->hashwlock[i]);
837
838
839
	}
}

840
841
842
843
844
845
846
847
848
/**
 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
 *
 * This function deletes both mesh paths as well as mesh portal paths.
 *
 * @sdata - interface data to match
 *
 */
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
849
{
850
	struct mesh_table *tbl;
851

852
	rcu_read_lock();
853
854
	read_lock_bh(&pathtbl_resize_lock);
	tbl = resize_dereference_mesh_paths();
855
	table_flush_by_iface(tbl, sdata);
856
	tbl = resize_dereference_mpp_paths();
857
	table_flush_by_iface(tbl, sdata);
858
	read_unlock_bh(&pathtbl_resize_lock);
859
	rcu_read_unlock();
860
861
862
863
864
865
}

/**
 * mesh_path_del - delete a mesh path from the table
 *
 * @addr: dst address (ETH_ALEN length)
866
 * @sdata: local subif
867
 *
868
 * Returns: 0 if successful
869
 */
870
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
871
{
872
	struct mesh_table *tbl;
873
874
875
876
877
878
879
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_head *bucket;
	struct hlist_node *n;
	int hash_idx;
	int err = 0;

880
	read_lock_bh(&pathtbl_resize_lock);
881
882
883
	tbl = resize_dereference_mesh_paths();
	hash_idx = mesh_table_hash(addr, sdata, tbl);
	bucket = &tbl->hash_buckets[hash_idx];
884

885
	spin_lock_bh(&tbl->hashwlock[hash_idx]);
886
887
	hlist_for_each_entry(node, n, bucket, list) {
		mpath = node->mpath;
888
		if (mpath->sdata == sdata &&
889
		    memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
890
			__mesh_path_del(tbl, node);
891
892
893
894
895
896
			goto enddel;
		}
	}

	err = -ENXIO;
enddel:
897
	mesh_paths_generation++;
898
	spin_unlock_bh(&tbl->hashwlock[hash_idx]);
899
	read_unlock_bh(&pathtbl_resize_lock);
900
901
902
903
904
905
906
907
908
909
910
911
912
	return err;
}

/**
 * mesh_path_tx_pending - sends pending frames in a mesh path queue
 *
 * @mpath: mesh path to activate
 *
 * Locking: the state_lock of the mpath structure must NOT be held when calling
 * this function.
 */
void mesh_path_tx_pending(struct mesh_path *mpath)
{
913
914
915
	if (mpath->flags & MESH_PATH_ACTIVE)
		ieee80211_add_pending_skbs(mpath->sdata->local,
				&mpath->frame_queue);
916
917
}

918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
/**
 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
 *
 * @mpath: mesh path whose queue will be emptied
 *
 * If there is only one gate, the frames are transferred from the failed mpath
 * queue to that gate's queue.  If there are more than one gates, the frames
 * are copied from each gate to the next.  After frames are copied, the
 * mpath queues are emptied onto the transmission queue.
 */
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
	struct ieee80211_sub_if_data *sdata = mpath->sdata;
	struct hlist_node *n;
	struct mesh_table *tbl;
	struct mesh_path *from_mpath = mpath;
	struct mpath_node *gate = NULL;
	bool copy = false;
	struct hlist_head *known_gates;

	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	known_gates = tbl->known_gates;
	rcu_read_unlock();

	if (!known_gates)
		return -EHOSTUNREACH;

	hlist_for_each_entry_rcu(gate, n, known_gates, list) {
		if (gate->mpath->sdata != sdata)
			continue;

		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
			mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
			from_mpath = gate->mpath;
			copy = true;
		} else {
			mpath_dbg("Not forwarding %p\n", gate->mpath);
			mpath_dbg("flags %x\n", gate->mpath->flags);
		}
	}

	hlist_for_each_entry_rcu(gate, n, known_gates, list)
		if (gate->mpath->sdata == sdata) {
			mpath_dbg("Sending to %pM\n", gate->mpath->dst);
			mesh_path_tx_pending(gate->mpath);
		}

	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}

970
971
972
973
/**
 * mesh_path_discard_frame - discard a frame whose path could not be resolved
 *
 * @skb: frame to discard
974
 * @sdata: network subif the frame was to be sent through
975
 *
976
977
978
979
 * If the frame was being forwarded from another MP, a PERR frame will be sent
 * to the precursor.  The precursor's address (i.e. the previous hop) was saved
 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
 * the destination is successfully resolved.
980
981
982
 *
 * Locking: the function must me called within a rcu_read_lock region
 */
983
984
void mesh_path_discard_frame(struct sk_buff *skb,
			     struct ieee80211_sub_if_data *sdata)
985
{
986
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
987
	struct mesh_path *mpath;
988
	u32 sn = 0;
989
	__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_NOFORWARD);
990

991
	if (memcmp(hdr->addr4, sdata->vif.addr, ETH_ALEN) != 0) {
992
993
		u8 *ra, *da;

994
		da = hdr->addr3;
995
		ra = hdr->addr2;
996
		rcu_read_lock();
997
		mpath = mesh_path_lookup(da, sdata);
998
999
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
1000
			sn = ++mpath->sn;
1001
1002
1003
			spin_unlock_bh(&mpath->state_lock);
		}
		rcu_read_unlock();
1004
		mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl, da,
1005
				   cpu_to_le32(sn), reason, ra, sdata);
1006
1007
1008
	}

	kfree_skb(skb);
1009
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
1010
1011
1012
1013
1014
1015
1016
}

/**
 * mesh_path_flush_pending - free the pending queue of a mesh path
 *
 * @mpath: mesh path whose queue has to be freed
 *
Lucas De Marchi's avatar
Lucas De Marchi committed
1017
 * Locking: the function must me called within a rcu_read_lock region
1018
1019
1020
1021
1022
 */
void mesh_path_flush_pending(struct mesh_path *mpath)
{
	struct sk_buff *skb;

1023
	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1024
		mesh_path_discard_frame(skb, mpath->sdata);
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
}

/**
 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
 *
 * @mpath: the mesh path to modify
 * @next_hop: the next hop to force
 *
 * Locking: this function must be called holding mpath->state_lock
 */
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
	spin_lock_bh(&mpath->state_lock);
	mesh_path_assign_nexthop(mpath, next_hop);
1039
	mpath->sn = 0xffff;
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
	mpath->metric = 0;
	mpath->hop_count = 0;
	mpath->exp_time = 0;
	mpath->flags |= MESH_PATH_FIXED;
	mesh_path_activate(mpath);
	spin_unlock_bh(&mpath->state_lock);
	mesh_path_tx_pending(mpath);
}

static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
	struct mesh_path *mpath;
	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	hlist_del_rcu(p);
1055
1056
	if (free_leafs) {
		del_timer_sync(&mpath->timer);
1057
		kfree(mpath);
1058
	}
1059
1060
1061
	kfree(node);
}

1062
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1063
1064
1065
1066
1067
{
	struct mesh_path *mpath;
	struct mpath_node *node, *new_node;
	u32 hash_idx;

1068
	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1069
1070
1071
	if (new_node == NULL)
		return -ENOMEM;

1072
1073
1074
	node = hlist_entry(p, struct mpath_node, list);
	mpath = node->mpath;
	new_node->mpath = mpath;
1075
	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1076
1077
	hlist_add_head(&new_node->list,
			&newtbl->hash_buckets[hash_idx]);
1078
	return 0;
1079
1080
1081
1082
}

int mesh_pathtbl_init(void)
{
1083
	struct mesh_table *tbl_path, *tbl_mpp;
1084
	int ret;
1085
1086
1087

	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_path)
1088
		return -ENOMEM;
1089
1090
1091
	tbl_path->free_node = &mesh_path_node_free;
	tbl_path->copy_node = &mesh_path_node_copy;
	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1092
	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1093
1094
1095
1096
	if (!tbl_path->known_gates) {
		ret = -ENOMEM;
		goto free_path;
	}
1097
1098
	INIT_HLIST_HEAD(tbl_path->known_gates);

1099

1100
1101
	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
	if (!tbl_mpp) {
1102
1103
		ret = -ENOMEM;
		goto free_path;
1104
	}
1105
1106
1107
	tbl_mpp->free_node = &mesh_path_node_free;
	tbl_mpp->copy_node = &mesh_path_node_copy;
	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1108
	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1109
1110
1111
1112
	if (!tbl_mpp->known_gates) {
		ret = -ENOMEM;
		goto free_mpp;
	}
1113
	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1114
1115
1116
1117

	/* Need no locking since this is during init */
	RCU_INIT_POINTER(mesh_paths, tbl_path);
	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1118

1119
	return 0;
1120
1121
1122
1123
1124
1125

free_mpp:
	mesh_table_free(tbl_mpp, true);
free_path:
	mesh_table_free(tbl_path, true);
	return ret;
1126
1127
}

1128
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1129
{
1130
	struct mesh_table *tbl;
1131
1132
1133
1134
1135
	struct mesh_path *mpath;
	struct mpath_node *node;
	struct hlist_node *p;
	int i;

1136
1137
1138
	rcu_read_lock();
	tbl = rcu_dereference(mesh_paths);
	for_each_mesh_entry(tbl, p, node, i) {
1139
		if (node->mpath->sdata != sdata)
1140
1141
1142
1143
			continue;
		mpath = node->mpath;
		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1144
		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1145
			mesh_path_del(mpath->dst, mpath->sdata);
1146
	}
1147
	rcu_read_unlock();
1148
1149
1150
1151
}

void mesh_pathtbl_unregister(void)
{
1152
	/* no need for locking during exit path */
1153
1154
	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1155
}