trans_fd.c 29.1 KB
Newer Older
1 2 3 4 5 6 7
/*
 * linux/fs/9p/trans_fd.c
 *
 * Fd transport layer.  Includes deprecated socket layer.
 *
 *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
 *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8
 *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License version 2
 *  as published by the Free Software Foundation.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
 *  along with this program; if not, write to:
 *  Free Software Foundation
 *  51 Franklin Street, Fifth Floor
 *  Boston, MA  02111-1301  USA
 *
 */

#include <linux/in.h>
#include <linux/module.h>
#include <linux/net.h>
#include <linux/ipv6.h>
32
#include <linux/kthread.h>
33 34 35 36 37 38 39
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/un.h>
#include <linux/uaccess.h>
#include <linux/inet.h>
#include <linux/idr.h>
#include <linux/file.h>
40
#include <linux/parser.h>
41
#include <net/9p/9p.h>
42
#include <net/9p/client.h>
43 44 45
#include <net/9p/transport.h>

#define P9_PORT 564
46
#define MAX_SOCK_BUF (64*1024)
47
#define MAXPOLLWADDR	2
48

49 50 51 52 53 54 55 56
/**
 * struct p9_fd_opts - per-transport options
 * @rfd: file descriptor for reading (trans=fd)
 * @wfd: file descriptor for writing (trans=fd)
 * @port: port to connect to (trans=tcp)
 *
 */

57 58 59 60 61
struct p9_fd_opts {
	int rfd;
	int wfd;
	u16 port;
};
62

63 64 65 66 67 68 69 70
/**
 * struct p9_trans_fd - transport state
 * @rd: reference to file to read from
 * @wr: reference of file to write to
 * @conn: connection state reference
 *
 */

71 72 73
struct p9_trans_fd {
	struct file *rd;
	struct file *wr;
74
	struct p9_conn *conn;
75 76
};

77 78 79 80
/*
  * Option Parsing (code inspired by NFS code)
  *  - a little lazy - parse all fd-transport options
  */
81

82 83
enum {
	/* Options that take integer arguments */
84
	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
85
};
86

87
static const match_table_t tokens = {
88 89 90
	{Opt_port, "port=%u"},
	{Opt_rfdno, "rfdno=%u"},
	{Opt_wfdno, "wfdno=%u"},
91
	{Opt_err, NULL},
92
};
93

94 95 96 97 98 99 100
enum {
	Rworksched = 1,		/* read work scheduled or running */
	Rpending = 2,		/* can read */
	Wworksched = 4,		/* write work scheduled or running */
	Wpending = 8,		/* can write */
};

Tejun Heo's avatar
Tejun Heo committed
101 102 103 104
struct p9_poll_wait {
	struct p9_conn *conn;
	wait_queue_t wait;
	wait_queue_head_t *wait_addr;
105 106 107 108 109
};

/**
 * struct p9_conn - fd mux connection state information
 * @mux_list: list link for mux to manage multiple connections (?)
110
 * @client: reference to client instance for this connection
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
 * @err: error state
 * @req_list: accounting for requests which have been sent
 * @unsent_req_list: accounting for requests that haven't been sent
 * @rcall: current response &p9_fcall structure
 * @rpos: read position in current frame
 * @rbuf: current read buffer
 * @wpos: write position for current frame
 * @wsize: amount of data to write for current frame
 * @wbuf: current write buffer
 * @poll_wait: array of wait_q's for various worker threads
 * @poll_waddr: ????
 * @pt: poll state
 * @rq: current read work
 * @wq: current write work
 * @wsched: ????
 *
 */
128 129 130

struct p9_conn {
	struct list_head mux_list;
131
	struct p9_client *client;
132 133 134 135 136 137 138 139 140
	int err;
	struct list_head req_list;
	struct list_head unsent_req_list;
	struct p9_fcall *rcall;
	int rpos;
	char *rbuf;
	int wpos;
	int wsize;
	char *wbuf;
Tejun Heo's avatar
Tejun Heo committed
141 142
	struct list_head poll_pending_link;
	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
143 144 145 146 147 148
	poll_table pt;
	struct work_struct rq;
	struct work_struct wq;
	unsigned long wsched;
};

Tejun Heo's avatar
Tejun Heo committed
149 150
static DEFINE_SPINLOCK(p9_poll_lock);
static LIST_HEAD(p9_poll_pending_list);
151
static struct workqueue_struct *p9_mux_wq;
Tejun Heo's avatar
Tejun Heo committed
152
static struct task_struct *p9_poll_task;
153

Tejun Heo's avatar
Tejun Heo committed
154
static void p9_mux_poll_stop(struct p9_conn *m)
155
{
Tejun Heo's avatar
Tejun Heo committed
156 157
	unsigned long flags;
	int i;
158

Tejun Heo's avatar
Tejun Heo committed
159 160
	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
		struct p9_poll_wait *pwait = &m->poll_wait[i];
161

Tejun Heo's avatar
Tejun Heo committed
162 163 164
		if (pwait->wait_addr) {
			remove_wait_queue(pwait->wait_addr, &pwait->wait);
			pwait->wait_addr = NULL;
165 166 167
		}
	}

Tejun Heo's avatar
Tejun Heo committed
168 169 170
	spin_lock_irqsave(&p9_poll_lock, flags);
	list_del_init(&m->poll_pending_link);
	spin_unlock_irqrestore(&p9_poll_lock, flags);
171 172
}

173
static void p9_conn_rpc_cb(struct p9_client *, struct p9_req_t *);
174

175
static void p9_mux_flush_cb(struct p9_client *client, struct p9_req_t *freq)
176
{
177 178
	struct p9_conn *m = client->trans;
	struct p9_req_t *req;
179 180

	P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m,
181 182
		freq->tc, freq->rc, freq->t_err,
		freq->tc->params.tflush.oldtag);
183

184
	req = p9_tag_lookup(client, freq->tc->params.tflush.oldtag);
185
	if (req) {
186 187 188
		req->status = REQ_STATUS_FLSHD;
		list_del(&req->req_list);
		p9_conn_rpc_cb(client, req);
189 190
	}

191
	p9_free_req(client, freq);
192 193
}

194
static void p9_conn_rpc_cb(struct p9_client *client, struct p9_req_t *req)
195 196 197
{
	P9_DPRINTK(P9_DEBUG_MUX, "req %p\n", req);

198
	if (req->tc->id == P9_TFLUSH) { /* flush callback */
199
		P9_DPRINTK(P9_DEBUG_MUX, "flush req %p\n", req);
200
		p9_mux_flush_cb(client, req);
201 202
	} else {			/* normal wakeup path */
		P9_DPRINTK(P9_DEBUG_MUX, "normal req %p\n", req);
203 204 205
		if (!req->t_err && (req->status == REQ_STATUS_FLSHD ||
				 req->status == REQ_STATUS_FLSH))
			req->t_err = -ERESTARTSYS;
206

207
		wake_up(req->wq);
208 209 210
	}
}

211
/**
212 213 214
 * p9_conn_cancel - cancel all pending requests with error
 * @m: mux data
 * @err: error code
215 216
 *
 */
217

218
void p9_conn_cancel(struct p9_conn *m, int err)
219
{
220
	struct p9_req_t *req, *rtmp;
221
	LIST_HEAD(cancel_list);
222

223 224
	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
	m->err = err;
225
	spin_lock(&m->client->lock);
226
	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
227 228 229
		req->status = REQ_STATUS_ERROR;
		if (!req->t_err)
			req->t_err = err;
230 231 232
		list_move(&req->req_list, &cancel_list);
	}
	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
233 234 235
		req->status = REQ_STATUS_ERROR;
		if (!req->t_err)
			req->t_err = err;
236
		list_move(&req->req_list, &cancel_list);
237
	}
238
	spin_unlock(&m->client->lock);
239

240 241
	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
		list_del(&req->req_list);
242
		p9_conn_rpc_cb(m->client, req);
243
	}
244
}
245

246
static void process_request(struct p9_conn *m, struct p9_req_t *req)
247 248 249
{
	int ecode;
	struct p9_str *ename;
250

251 252 253
	if (!req->t_err && req->rc->id == P9_RERROR) {
		ecode = req->rc->params.rerror.errno;
		ename = &req->rc->params.rerror.error;
254 255 256 257 258

		P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len,
								ename->str);

		if (m->client->dotu)
259
			req->t_err = -ecode;
260

261 262
		if (!req->t_err) {
			req->t_err = p9_errstr2errno(ename->str, ename->len);
263 264

			/* string match failed */
265 266 267
			if (!req->t_err) {
				PRINT_FCALL_ERROR("unknown error", req->rc);
				req->t_err = -ESERVERFAULT;
268
			}
269
		}
270
	} else if (req->tc && req->rc->id != req->tc->id + 1) {
271 272
		P9_DPRINTK(P9_DEBUG_ERROR,
				"fcall mismatch: expected %d, got %d\n",
273 274 275
				req->tc->id + 1, req->rc->id);
		if (!req->t_err)
			req->t_err = -EIO;
276 277 278
	}
}

279 280
static unsigned int
p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
281
{
282 283
	int ret, n;
	struct p9_trans_fd *ts = NULL;
284

285 286
	if (client && client->status == Connected)
		ts = client->trans;
287

288 289
	if (!ts)
		return -EREMOTEIO;
290

291 292
	if (!ts->rd->f_op || !ts->rd->f_op->poll)
		return -EIO;
293

294 295
	if (!ts->wr->f_op || !ts->wr->f_op->poll)
		return -EIO;
Tejun Heo's avatar
Tejun Heo committed
296

297 298 299
	ret = ts->rd->f_op->poll(ts->rd, pt);
	if (ret < 0)
		return ret;
Tejun Heo's avatar
Tejun Heo committed
300

301 302 303 304 305 306 307 308
	if (ts->rd != ts->wr) {
		n = ts->wr->f_op->poll(ts->wr, pt);
		if (n < 0)
			return n;
		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
	}

	return ret;
Tejun Heo's avatar
Tejun Heo committed
309 310
}

311
/**
312 313 314 315
 * p9_fd_read- read from a fd
 * @client: client instance
 * @v: buffer to receive data into
 * @len: size of receive buffer
316
 *
317
 */
318

319
static int p9_fd_read(struct p9_client *client, void *v, int len)
320
{
321 322
	int ret;
	struct p9_trans_fd *ts = NULL;
323

324 325
	if (client && client->status != Disconnected)
		ts = client->trans;
326

327 328
	if (!ts)
		return -EREMOTEIO;
329

330 331
	if (!(ts->rd->f_flags & O_NONBLOCK))
		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
332

333 334 335 336
	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
		client->status = Disconnected;
	return ret;
337 338 339
}

/**
340 341
 * p9_read_work - called when there is some data to be read from a transport
 * @work: container of work to be done
342
 *
343
 */
344

345
static void p9_read_work(struct work_struct *work)
346
{
347 348
	int n, err;
	struct p9_conn *m;
349
	struct p9_req_t *req;
350 351 352 353
	struct p9_fcall *rcall;
	char *rbuf;

	m = container_of(work, struct p9_conn, rq);
354 355 356 357

	if (m->err < 0)
		return;

358 359
	rcall = NULL;
	P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos);
360

361 362 363 364 365 366 367
	if (!m->rcall) {
		m->rcall =
		    kmalloc(sizeof(struct p9_fcall) + m->client->msize,
								GFP_KERNEL);
		if (!m->rcall) {
			err = -ENOMEM;
			goto error;
368
		}
369 370 371

		m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
		m->rpos = 0;
372 373
	}

374 375 376 377 378 379 380
	clear_bit(Rpending, &m->wsched);
	err = p9_fd_read(m->client, m->rbuf + m->rpos,
						m->client->msize - m->rpos);
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err);
	if (err == -EAGAIN) {
		clear_bit(Rworksched, &m->wsched);
		return;
381 382
	}

383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	if (err <= 0)
		goto error;

	m->rpos += err;
	while (m->rpos > 4) {
		n = le32_to_cpu(*(__le32 *) m->rbuf);
		if (n >= m->client->msize) {
			P9_DPRINTK(P9_DEBUG_ERROR,
				"requested packet size too big: %d\n", n);
			err = -EIO;
			goto error;
		}

		if (m->rpos < n)
			break;

		err =
		    p9_deserialize_fcall(m->rbuf, n, m->rcall, m->client->dotu);
		if (err < 0)
			goto error;

#ifdef CONFIG_NET_9P_DEBUG
		if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
			char buf[150];

			p9_printfcall(buf, sizeof(buf), m->rcall,
				m->client->dotu);
			printk(KERN_NOTICE ">>> %p %s\n", m, buf);
		}
#endif

		rcall = m->rcall;
		rbuf = m->rbuf;
		if (m->rpos > n) {
			m->rcall = kmalloc(sizeof(struct p9_fcall) +
						m->client->msize, GFP_KERNEL);
			if (!m->rcall) {
				err = -ENOMEM;
				goto error;
			}

			m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall);
			memmove(m->rbuf, rbuf + n, m->rpos - n);
			m->rpos -= n;
		} else {
			m->rcall = NULL;
			m->rbuf = NULL;
			m->rpos = 0;
		}

		P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m,
							rcall->id, rcall->tag);

436
		req = p9_tag_lookup(m->client, rcall->tag);
437 438

		if (req) {
439 440 441 442 443 444
			if (req->status != REQ_STATUS_FLSH) {
				list_del(&req->req_list);
				req->status = REQ_STATUS_RCVD;
			}

			req->rc = rcall;
445 446
			process_request(m, req);

447 448
			if (req->status != REQ_STATUS_FLSH)
				p9_conn_rpc_cb(m->client, req);
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
		} else {
			if (err >= 0 && rcall->id != P9_RFLUSH)
				P9_DPRINTK(P9_DEBUG_ERROR,
				  "unexpected response mux %p id %d tag %d\n",
				  m, rcall->id, rcall->tag);
			kfree(rcall);
		}
	}

	if (!list_empty(&m->req_list)) {
		if (test_and_clear_bit(Rpending, &m->wsched))
			n = POLLIN;
		else
			n = p9_fd_poll(m->client, NULL);

		if (n & POLLIN) {
			P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
			queue_work(p9_mux_wq, &m->rq);
		} else
			clear_bit(Rworksched, &m->wsched);
	} else
		clear_bit(Rworksched, &m->wsched);

	return;

error:
	p9_conn_cancel(m, err);
	clear_bit(Rworksched, &m->wsched);
}

/**
 * p9_fd_write - write to a socket
 * @client: client instance
 * @v: buffer to send data from
 * @len: size of send buffer
484
 *
485
 */
486

487
static int p9_fd_write(struct p9_client *client, void *v, int len)
488
{
489 490 491
	int ret;
	mm_segment_t oldfs;
	struct p9_trans_fd *ts = NULL;
492

493 494
	if (client && client->status != Disconnected)
		ts = client->trans;
495

496 497
	if (!ts)
		return -EREMOTEIO;
498

499 500
	if (!(ts->wr->f_flags & O_NONBLOCK))
		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
Tejun Heo's avatar
Tejun Heo committed
501

502 503 504 505 506
	oldfs = get_fs();
	set_fs(get_ds());
	/* The cast to a user pointer is valid due to the set_fs() */
	ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos);
	set_fs(oldfs);
Tejun Heo's avatar
Tejun Heo committed
507

508 509 510
	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
		client->status = Disconnected;
	return ret;
511 512 513 514
}

/**
 * p9_write_work - called when a transport can send some data
515 516
 * @work: container for work to be done
 *
517
 */
518

519 520 521 522
static void p9_write_work(struct work_struct *work)
{
	int n, err;
	struct p9_conn *m;
523
	struct p9_req_t *req;
524 525 526 527 528 529 530 531 532 533 534 535 536 537

	m = container_of(work, struct p9_conn, wq);

	if (m->err < 0) {
		clear_bit(Wworksched, &m->wsched);
		return;
	}

	if (!m->wsize) {
		if (list_empty(&m->unsent_req_list)) {
			clear_bit(Wworksched, &m->wsched);
			return;
		}

538 539
		spin_lock(&m->client->lock);
		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
540
			       req_list);
541
		req->status = REQ_STATUS_SENT;
542 543
		list_move_tail(&req->req_list, &m->req_list);

544 545
		m->wbuf = req->tc->sdata;
		m->wsize = req->tc->size;
546
		m->wpos = 0;
547
		spin_unlock(&m->client->lock);
548 549 550 551 552
	}

	P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos,
								m->wsize);
	clear_bit(Wpending, &m->wsched);
553
	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
	P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err);
	if (err == -EAGAIN) {
		clear_bit(Wworksched, &m->wsched);
		return;
	}

	if (err < 0)
		goto error;
	else if (err == 0) {
		err = -EREMOTEIO;
		goto error;
	}

	m->wpos += err;
	if (m->wpos == m->wsize)
		m->wpos = m->wsize = 0;

	if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
		if (test_and_clear_bit(Wpending, &m->wsched))
			n = POLLOUT;
		else
575
			n = p9_fd_poll(m->client, NULL);
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591

		if (n & POLLOUT) {
			P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
			queue_work(p9_mux_wq, &m->wq);
		} else
			clear_bit(Wworksched, &m->wsched);
	} else
		clear_bit(Wworksched, &m->wsched);

	return;

error:
	p9_conn_cancel(m, err);
	clear_bit(Wworksched, &m->wsched);
}

592
static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
593
{
594 595 596 597 598
	struct p9_poll_wait *pwait =
		container_of(wait, struct p9_poll_wait, wait);
	struct p9_conn *m = pwait->conn;
	unsigned long flags;
	DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
599

600 601 602 603
	spin_lock_irqsave(&p9_poll_lock, flags);
	if (list_empty(&m->poll_pending_link))
		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
	spin_unlock_irqrestore(&p9_poll_lock, flags);
604

605 606
	/* perform the default wake up operation */
	return default_wake_function(&dummy_wait, mode, sync, key);
607 608 609
}

/**
610 611 612 613
 * p9_pollwait - add poll task to the wait queue
 * @filp: file pointer being polled
 * @wait_address: wait_q to block on
 * @p: poll state
614
 *
615
 * called by files poll operation to add v9fs-poll task to files wait queue
616
 */
617

618 619
static void
p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
620
{
621 622 623
	struct p9_conn *m = container_of(p, struct p9_conn, pt);
	struct p9_poll_wait *pwait = NULL;
	int i;
624

625 626 627 628
	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
		if (m->poll_wait[i].wait_addr == NULL) {
			pwait = &m->poll_wait[i];
			break;
629 630 631
		}
	}

632 633
	if (!pwait) {
		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
634 635 636
		return;
	}

637 638 639 640 641
	if (!wait_address) {
		P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n");
		pwait->wait_addr = ERR_PTR(-EIO);
		return;
	}
642

643 644 645 646 647
	pwait->conn = m;
	pwait->wait_addr = wait_address;
	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
	add_wait_queue(wait_address, &pwait->wait);
}
648

649 650 651 652 653 654
/**
 * p9_conn_create - allocate and initialize the per-session mux data
 * @client: client instance
 *
 * Note: Creates the polling task if this is the first session.
 */
655

656 657 658 659
static struct p9_conn *p9_conn_create(struct p9_client *client)
{
	int i, n;
	struct p9_conn *m;
660

661 662 663 664
	P9_DPRINTK(P9_DEBUG_MUX, "client %p msize %d\n", client, client->msize);
	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
	if (!m)
		return ERR_PTR(-ENOMEM);
665

666 667
	INIT_LIST_HEAD(&m->mux_list);
	m->client = client;
668

669 670 671 672 673 674
	INIT_LIST_HEAD(&m->req_list);
	INIT_LIST_HEAD(&m->unsent_req_list);
	INIT_WORK(&m->rq, p9_read_work);
	INIT_WORK(&m->wq, p9_write_work);
	INIT_LIST_HEAD(&m->poll_pending_link);
	init_poll_funcptr(&m->pt, p9_pollwait);
675

676 677 678 679 680
	n = p9_fd_poll(client, &m->pt);
	if (n & POLLIN) {
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
		set_bit(Rpending, &m->wsched);
	}
681

682 683 684 685 686 687 688 689 690 691 692
	if (n & POLLOUT) {
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
		set_bit(Wpending, &m->wsched);
	}

	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
		if (IS_ERR(m->poll_wait[i].wait_addr)) {
			p9_mux_poll_stop(m);
			kfree(m);
			/* return the error code */
			return (void *)m->poll_wait[i].wait_addr;
693 694 695
		}
	}

696 697
	return m;
}
698

699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723
/**
 * p9_poll_mux - polls a mux and schedules read or write works if necessary
 * @m: connection to poll
 *
 */

static void p9_poll_mux(struct p9_conn *m)
{
	int n;

	if (m->err < 0)
		return;

	n = p9_fd_poll(m->client, NULL);
	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
		P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n);
		if (n >= 0)
			n = -ECONNRESET;
		p9_conn_cancel(m, n);
	}

	if (n & POLLIN) {
		set_bit(Rpending, &m->wsched);
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m);
		if (!test_and_set_bit(Rworksched, &m->wsched)) {
724 725
			P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m);
			queue_work(p9_mux_wq, &m->rq);
726 727
		}
	}
728

729 730 731 732 733 734 735 736 737
	if (n & POLLOUT) {
		set_bit(Wpending, &m->wsched);
		P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m);
		if ((m->wsize || !list_empty(&m->unsent_req_list))
		    && !test_and_set_bit(Wworksched, &m->wsched)) {
			P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m);
			queue_work(p9_mux_wq, &m->wq);
		}
	}
738 739 740 741 742 743 744 745 746 747 748
}

/**
 * p9_send_request - send 9P request
 * The function can sleep until the request is scheduled for sending.
 * The function can be interrupted. Return from the function is not
 * a guarantee that the request is sent successfully. Can return errors
 * that can be retrieved by PTR_ERR macros.
 *
 * @m: mux data
 * @tc: request to be sent
749
 *
750
 */
751

752
static struct p9_req_t *p9_send_request(struct p9_conn *m, struct p9_fcall *tc)
753
{
754
	int tag;
755
	int n;
756
	struct p9_req_t *req;
757 758 759 760 761 762

	P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current,
		tc, tc->id);
	if (m->err < 0)
		return ERR_PTR(m->err);

763
	tag = P9_NOTAG;
764
	if (tc->id != P9_TVERSION) {
765 766
		tag = p9_idpool_get(m->client->tagpool);
		if (tag < 0)
767
			return ERR_PTR(-ENOMEM);
Julia Lawall's avatar
Julia Lawall committed
768
	}
769

770 771 772
	p9_set_tag(tc, tag);

	req = p9_tag_alloc(m->client, tag);
773 774 775 776 777

#ifdef CONFIG_NET_9P_DEBUG
	if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) {
		char buf[150];

778
		p9_printfcall(buf, sizeof(buf), tc, m->client->dotu);
779 780 781 782
		printk(KERN_NOTICE "<<< %p %s\n", m, buf);
	}
#endif

783 784 785 786 787
	req->tag = tag;
	req->tc = tc;
	req->rc = NULL;
	req->t_err = 0;
	req->status = REQ_STATUS_UNSENT;
788

789
	spin_lock(&m->client->lock);
790
	list_add_tail(&req->req_list, &m->unsent_req_list);
791
	spin_unlock(&m->client->lock);
792 793 794 795

	if (test_and_clear_bit(Wpending, &m->wsched))
		n = POLLOUT;
	else
796
		n = p9_fd_poll(m->client, NULL);
797 798 799 800 801 802 803 804

	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
		queue_work(p9_mux_wq, &m->wq);

	return req;
}

static int
805
p9_mux_flush_request(struct p9_conn *m, struct p9_req_t *req)
806 807
{
	struct p9_fcall *fc;
808
	struct p9_req_t *rreq, *rptr;
809 810 811 812

	P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag);

	/* if a response was received for a request, do nothing */
813
	if (req->rc || req->t_err) {
814 815 816 817 818
		P9_DPRINTK(P9_DEBUG_MUX,
			"mux %p req %p response already received\n", m, req);
		return 0;
	}

819
	req->status = REQ_STATUS_FLSH;
820

821
	spin_lock(&m->client->lock);
822 823 824 825 826 827
	/* if the request is not sent yet, just remove it from the list */
	list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) {
		if (rreq->tag == req->tag) {
			P9_DPRINTK(P9_DEBUG_MUX,
			   "mux %p req %p request is not sent yet\n", m, req);
			list_del(&rreq->req_list);
828 829 830
			req->status = REQ_STATUS_FLSHD;
			spin_unlock(&m->client->lock);
			p9_conn_rpc_cb(m->client, req);
831 832 833
			return 0;
		}
	}
834
	spin_unlock(&m->client->lock);
835 836 837

	clear_thread_flag(TIF_SIGPENDING);
	fc = p9_create_tflush(req->tag);
838
	p9_send_request(m, fc);
839 840 841 842 843 844
	return 1;
}

/**
 * p9_fd_rpc- sends 9P request and waits until a response is available.
 *	The function can be interrupted.
845
 * @client: client instance
846 847
 * @tc: request to be sent
 * @rc: pointer where a pointer to the response is stored
848
 *
849
 */
850

851
int
852
p9_fd_rpc(struct p9_client *client, struct p9_fcall *tc, struct p9_fcall **rc)
853
{
854
	struct p9_trans_fd *p = client->trans;
855 856 857
	struct p9_conn *m = p->conn;
	int err, sigpending;
	unsigned long flags;
858
	struct p9_req_t *req;
859 860 861 862 863 864 865 866 867 868

	if (rc)
		*rc = NULL;

	sigpending = 0;
	if (signal_pending(current)) {
		sigpending = 1;
		clear_thread_flag(TIF_SIGPENDING);
	}

869
	req = p9_send_request(m, tc);
870 871 872 873 874 875
	if (IS_ERR(req)) {
		err = PTR_ERR(req);
		P9_DPRINTK(P9_DEBUG_MUX, "error %d\n", err);
		return err;
	}

876 877 878 879
	err = wait_event_interruptible(*req->wq, req->rc != NULL ||
								req->t_err < 0);
	if (req->t_err < 0)
		err = req->t_err;
880

881
	if (err == -ERESTARTSYS && client->status == Connected
882 883 884 885 886
							&& m->err == 0) {
		if (p9_mux_flush_request(m, req)) {
			/* wait until we get response of the flush message */
			do {
				clear_thread_flag(TIF_SIGPENDING);
887 888 889
				err = wait_event_interruptible(*req->wq,
					req->rc || req->t_err);
			} while (!req->rc && !req->t_err &&
890 891
					err == -ERESTARTSYS &&
					client->status == Connected && !m->err);
892 893 894 895 896 897 898 899 900 901 902 903 904

			err = -ERESTARTSYS;
		}
		sigpending = 1;
	}

	if (sigpending) {
		spin_lock_irqsave(&current->sighand->siglock, flags);
		recalc_sigpending();
		spin_unlock_irqrestore(&current->sighand->siglock, flags);
	}

	if (rc)
905
		*rc = req->rc;
906
	else
907
		kfree(req->rc);
908

909
	p9_free_req(client, req);
910 911 912 913 914 915
	if (err > 0)
		err = -EIO;

	return err;
}

916
/**
917
 * parse_options - parse mount options into session structure
918
 * @options: options string passed from mount
919
 * @opts: transport-specific structure to parse options into
920
 *
921
 * Returns 0 upon success, -ERRNO upon failure
922
 */
923

924
static int parse_opts(char *params, struct p9_fd_opts *opts)
925
{
926 927 928
	char *p;
	substring_t args[MAX_OPT_ARGS];
	int option;
929
	char *options;
930
	int ret;
931

932 933 934
	opts->port = P9_PORT;
	opts->rfd = ~0;
	opts->wfd = ~0;
935

936 937 938 939 940 941 942 943 944
	if (!params)
		return 0;

	options = kstrdup(params, GFP_KERNEL);
	if (!options) {
		P9_DPRINTK(P9_DEBUG_ERROR,
				"failed to allocate copy of option string\n");
		return -ENOMEM;
	}
945

946 947
	while ((p = strsep(&options, ",")) != NULL) {
		int token;
948
		int r;
949 950 951
		if (!*p)
			continue;
		token = match_token(p, tokens, args);
952 953
		r = match_int(&args[0], &option);
		if (r < 0) {
954 955
			P9_DPRINTK(P9_DEBUG_ERROR,
			 "integer field, but no integer?\n");
956
			ret = r;
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971
			continue;
		}
		switch (token) {
		case Opt_port:
			opts->port = option;
			break;
		case Opt_rfdno:
			opts->rfd = option;
			break;
		case Opt_wfdno:
			opts->wfd = option;
			break;
		default:
			continue;
		}
972
	}