summaryrefslogtreecommitdiffstats
path: root/net/9p
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 10:22:15 +0200
committerJiri Kosina <jkosina@suse.cz>2011-04-26 10:22:59 +0200
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /net/9p
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'net/9p')
-rw-r--r--net/9p/client.c41
-rw-r--r--net/9p/protocol.c13
-rw-r--r--net/9p/trans_common.c16
-rw-r--r--net/9p/trans_fd.c2
-rw-r--r--net/9p/trans_rdma.c6
-rw-r--r--net/9p/trans_virtio.c87
-rw-r--r--net/9p/util.c4
7 files changed, 88 insertions, 81 deletions
diff --git a/net/9p/client.c b/net/9p/client.c
index 347ec0cd271..77367745be9 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -178,7 +178,7 @@ free_and_return:
* @tag: numeric id for transaction
*
* this is a simple array lookup, but will grow the
- * request_slots as necessary to accomodate transaction
+ * request_slots as necessary to accommodate transaction
* ids which did not previously have a slot.
*
* this code relies on the client spinlock to manage locks, its
@@ -223,7 +223,7 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
req = &c->reqs[row][col];
if (!req->tc) {
- req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL);
+ req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS);
if (!req->wq) {
printk(KERN_ERR "Couldn't grow tag array\n");
return ERR_PTR(-ENOMEM);
@@ -233,17 +233,17 @@ static struct p9_req_t *p9_tag_alloc(struct p9_client *c, u16 tag)
P9_TRANS_PREF_PAYLOAD_SEP) {
int alloc_msize = min(c->msize, 4096);
req->tc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_KERNEL);
+ GFP_NOFS);
req->tc->capacity = alloc_msize;
req->rc = kmalloc(sizeof(struct p9_fcall)+alloc_msize,
- GFP_KERNEL);
+ GFP_NOFS);
req->rc->capacity = alloc_msize;
} else {
req->tc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_KERNEL);
+ GFP_NOFS);
req->tc->capacity = c->msize;
req->rc = kmalloc(sizeof(struct p9_fcall)+c->msize,
- GFP_KERNEL);
+ GFP_NOFS);
req->rc->capacity = c->msize;
}
if ((!req->tc) || (!req->rc)) {
@@ -929,15 +929,15 @@ error:
}
EXPORT_SYMBOL(p9_client_attach);
-struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
- int clone)
+struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname,
+ char **wnames, int clone)
{
int err;
struct p9_client *clnt;
struct p9_fid *fid;
struct p9_qid *wqids;
struct p9_req_t *req;
- int16_t nwqids, count;
+ uint16_t nwqids, count;
err = 0;
wqids = NULL;
@@ -955,7 +955,7 @@ struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames,
fid = oldfid;
- P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %d wname[0] %s\n",
+ P9_DPRINTK(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n",
oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL);
req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid,
@@ -1220,27 +1220,6 @@ error:
}
EXPORT_SYMBOL(p9_client_fsync);
-int p9_client_sync_fs(struct p9_fid *fid)
-{
- int err = 0;
- struct p9_req_t *req;
- struct p9_client *clnt;
-
- P9_DPRINTK(P9_DEBUG_9P, ">>> TSYNC_FS fid %d\n", fid->fid);
-
- clnt = fid->clnt;
- req = p9_client_rpc(clnt, P9_TSYNCFS, "d", fid->fid);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto error;
- }
- P9_DPRINTK(P9_DEBUG_9P, "<<< RSYNCFS fid %d\n", fid->fid);
- p9_free_req(clnt, req);
-error:
- return err;
-}
-EXPORT_SYMBOL(p9_client_sync_fs);
-
int p9_client_clunk(struct p9_fid *fid)
{
int err;
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 2ce515b859b..b58a501cf3d 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -205,7 +205,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
if (errcode)
break;
- *sptr = kmalloc(len + 1, GFP_KERNEL);
+ *sptr = kmalloc(len + 1, GFP_NOFS);
if (*sptr == NULL) {
errcode = -EFAULT;
break;
@@ -265,7 +265,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'T':{
- int16_t *nwname = va_arg(ap, int16_t *);
+ uint16_t *nwname = va_arg(ap, uint16_t *);
char ***wnames = va_arg(ap, char ***);
errcode = p9pdu_readf(pdu, proto_version,
@@ -273,7 +273,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
if (!errcode) {
*wnames =
kmalloc(sizeof(char *) * *nwname,
- GFP_KERNEL);
+ GFP_NOFS);
if (!*wnames)
errcode = -ENOMEM;
}
@@ -317,7 +317,7 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
*wqids =
kmalloc(*nwqid *
sizeof(struct p9_qid),
- GFP_KERNEL);
+ GFP_NOFS);
if (*wqids == NULL)
errcode = -ENOMEM;
}
@@ -468,7 +468,8 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
case 'E':{
int32_t cnt = va_arg(ap, int32_t);
const char *k = va_arg(ap, const void *);
- const char *u = va_arg(ap, const void *);
+ const char __user *u = va_arg(ap,
+ const void __user *);
errcode = p9pdu_writef(pdu, proto_version, "d",
cnt);
if (!errcode && pdu_write_urw(pdu, k, u, cnt))
@@ -495,7 +496,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
}
break;
case 'T':{
- int16_t nwname = va_arg(ap, int);
+ uint16_t nwname = va_arg(ap, int);
const char **wnames = va_arg(ap, const char **);
errcode = p9pdu_writef(pdu, proto_version, "w",
diff --git a/net/9p/trans_common.c b/net/9p/trans_common.c
index d62b9aa58df..e883172f9aa 100644
--- a/net/9p/trans_common.c
+++ b/net/9p/trans_common.c
@@ -36,14 +36,14 @@ p9_release_req_pages(struct trans_rpage_info *rpinfo)
EXPORT_SYMBOL(p9_release_req_pages);
/**
- * p9_nr_pages - Return number of pages needed to accomodate the payload.
+ * p9_nr_pages - Return number of pages needed to accommodate the payload.
*/
int
p9_nr_pages(struct p9_req_t *req)
{
- int start_page, end_page;
- start_page = (unsigned long long)req->tc->pubuf >> PAGE_SHIFT;
- end_page = ((unsigned long long)req->tc->pubuf + req->tc->pbuf_size +
+ unsigned long start_page, end_page;
+ start_page = (unsigned long)req->tc->pubuf >> PAGE_SHIFT;
+ end_page = ((unsigned long)req->tc->pubuf + req->tc->pbuf_size +
PAGE_SIZE - 1) >> PAGE_SHIFT;
return end_page - start_page;
}
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(p9_nr_pages);
* @req: Request to be sent to server.
* @pdata_off: data offset into the first page after translation (gup).
* @pdata_len: Total length of the IO. gup may not return requested # of pages.
- * @nr_pages: number of pages to accomodate the payload
+ * @nr_pages: number of pages to accommodate the payload
* @rw: Indicates if the pages are for read or write.
*/
int
@@ -66,11 +66,11 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len,
uint32_t pdata_mapped_pages;
struct trans_rpage_info *rpinfo;
- *pdata_off = (size_t)req->tc->pubuf & (PAGE_SIZE-1);
+ *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1);
if (*pdata_off)
- first_page_bytes = min((PAGE_SIZE - *pdata_off),
- req->tc->pbuf_size);
+ first_page_bytes = min(((size_t)PAGE_SIZE - *pdata_off),
+ req->tc->pbuf_size);
rpinfo = req->tc->private;
pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf,
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
index a30471e5174..aa5672b15ea 100644
--- a/net/9p/trans_fd.c
+++ b/net/9p/trans_fd.c
@@ -350,7 +350,7 @@ static void p9_read_work(struct work_struct *work)
if (m->req->rc == NULL) {
m->req->rc = kmalloc(sizeof(struct p9_fcall) +
- m->client->msize, GFP_KERNEL);
+ m->client->msize, GFP_NOFS);
if (!m->req->rc) {
m->req = NULL;
err = -ENOMEM;
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 29a54ccd213..150e0c4bbf4 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -424,7 +424,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
struct p9_rdma_context *rpl_context = NULL;
/* Allocate an fcall for the reply */
- rpl_context = kmalloc(sizeof *rpl_context, GFP_KERNEL);
+ rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
if (!rpl_context) {
err = -ENOMEM;
goto err_close;
@@ -437,7 +437,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
*/
if (!req->rc) {
req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize,
- GFP_KERNEL);
+ GFP_NOFS);
if (req->rc) {
req->rc->sdata = (char *) req->rc +
sizeof(struct p9_fcall);
@@ -468,7 +468,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
req->rc = NULL;
/* Post the request */
- c = kmalloc(sizeof *c, GFP_KERNEL);
+ c = kmalloc(sizeof *c, GFP_NOFS);
if (!c) {
err = -ENOMEM;
goto err_free1;
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9b550ed9c71..244e7074218 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -43,6 +43,7 @@
#include <net/9p/client.h>
#include <net/9p/transport.h>
#include <linux/scatterlist.h>
+#include <linux/swap.h>
#include <linux/virtio.h>
#include <linux/virtio_9p.h>
#include "trans_common.h"
@@ -51,6 +52,8 @@
/* a single mutex to manage channel initialization and attachment */
static DEFINE_MUTEX(virtio_9p_lock);
+static DECLARE_WAIT_QUEUE_HEAD(vp_wq);
+static atomic_t vp_pinned = ATOMIC_INIT(0);
/**
* struct virtio_chan - per-instance transport information
@@ -78,7 +81,10 @@ struct virtio_chan {
struct virtqueue *vq;
int ring_bufs_avail;
wait_queue_head_t *vc_wq;
-
+ /* This is global limit. Since we don't have a global structure,
+ * will be placing it in each channel.
+ */
+ int p9_max_pages;
/* Scatterlist: can be too big for stack. */
struct scatterlist sg[VIRTQUEUE_NUM];
@@ -141,34 +147,36 @@ static void req_done(struct virtqueue *vq)
P9_DPRINTK(P9_DEBUG_TRANS, ": request done\n");
- do {
+ while (1) {
spin_lock_irqsave(&chan->lock, flags);
rc = virtqueue_get_buf(chan->vq, &len);
- if (rc != NULL) {
- if (!chan->ring_bufs_avail) {
- chan->ring_bufs_avail = 1;
- wake_up(chan->vc_wq);
- }
- spin_unlock_irqrestore(&chan->lock, flags);
- P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
- P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n",
- rc->tag);
- req = p9_tag_lookup(chan->client, rc->tag);
- req->status = REQ_STATUS_RCVD;
- if (req->tc->private) {
- struct trans_rpage_info *rp = req->tc->private;
- /*Release pages */
- p9_release_req_pages(rp);
- if (rp->rp_alloc)
- kfree(rp);
- req->tc->private = NULL;
- }
- p9_client_cb(chan->client, req);
- } else {
+ if (rc == NULL) {
spin_unlock_irqrestore(&chan->lock, flags);
+ break;
+ }
+
+ chan->ring_bufs_avail = 1;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ /* Wakeup if anyone waiting for VirtIO ring space. */
+ wake_up(chan->vc_wq);
+ P9_DPRINTK(P9_DEBUG_TRANS, ": rc %p\n", rc);
+ P9_DPRINTK(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag);
+ req = p9_tag_lookup(chan->client, rc->tag);
+ if (req->tc->private) {
+ struct trans_rpage_info *rp = req->tc->private;
+ int p = rp->rp_nr_pages;
+ /*Release pages */
+ p9_release_req_pages(rp);
+ atomic_sub(p, &vp_pinned);
+ wake_up(&vp_wq);
+ if (rp->rp_alloc)
+ kfree(rp);
+ req->tc->private = NULL;
}
- } while (rc != NULL);
+ req->status = REQ_STATUS_RCVD;
+ p9_client_cb(chan->client, req);
+ }
}
/**
@@ -263,7 +271,6 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
P9_DPRINTK(P9_DEBUG_TRANS, "9p debug: virtio request\n");
-req_retry:
req->status = REQ_STATUS_SENT;
if (req->tc->pbuf_size && (req->tc->pubuf && P9_IS_USER_CONTEXT)) {
@@ -271,6 +278,14 @@ req_retry:
int rpinfo_size = sizeof(struct trans_rpage_info) +
sizeof(struct page *) * nr_pages;
+ if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
+ err = wait_event_interruptible(vp_wq,
+ atomic_read(&vp_pinned) < chan->p9_max_pages);
+ if (err == -ERESTARTSYS)
+ return err;
+ P9_DPRINTK(P9_DEBUG_TRANS, "9p: May gup pages now.\n");
+ }
+
if (rpinfo_size <= (req->tc->capacity - req->tc->size)) {
/* We can use sdata */
req->tc->private = req->tc->sdata + req->tc->size;
@@ -293,9 +308,12 @@ req_retry:
if (rpinfo->rp_alloc)
kfree(rpinfo);
return err;
+ } else {
+ atomic_add(rpinfo->rp_nr_pages, &vp_pinned);
}
}
+req_retry_pinned:
spin_lock_irqsave(&chan->lock, flags);
/* Handle out VirtIO ring buffers */
@@ -308,8 +326,11 @@ req_retry:
outp = pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM,
pdata_off, rpinfo->rp_data, pdata_len);
} else {
- char *pbuf = req->tc->pubuf ? req->tc->pubuf :
- req->tc->pkbuf;
+ char *pbuf;
+ if (req->tc->pubuf)
+ pbuf = (__force char *) req->tc->pubuf;
+ else
+ pbuf = req->tc->pkbuf;
outp = pack_sg_list(chan->sg, out, VIRTQUEUE_NUM, pbuf,
req->tc->pbuf_size);
}
@@ -334,8 +355,12 @@ req_retry:
in = pack_sg_list_p(chan->sg, out+inp, VIRTQUEUE_NUM,
pdata_off, rpinfo->rp_data, pdata_len);
} else {
- char *pbuf = req->tc->pubuf ? req->tc->pubuf :
- req->tc->pkbuf;
+ char *pbuf;
+ if (req->tc->pubuf)
+ pbuf = (__force char *) req->tc->pubuf;
+ else
+ pbuf = req->tc->pkbuf;
+
in = pack_sg_list(chan->sg, out+inp, VIRTQUEUE_NUM,
pbuf, req->tc->pbuf_size);
}
@@ -356,7 +381,7 @@ req_retry:
return err;
P9_DPRINTK(P9_DEBUG_TRANS, "9p:Retry virtio request\n");
- goto req_retry;
+ goto req_retry_pinned;
} else {
spin_unlock_irqrestore(&chan->lock, flags);
P9_DPRINTK(P9_DEBUG_TRANS,
@@ -453,6 +478,8 @@ static int p9_virtio_probe(struct virtio_device *vdev)
}
init_waitqueue_head(chan->vc_wq);
chan->ring_bufs_avail = 1;
+ /* Ceiling limit to avoid denial of service attacks */
+ chan->p9_max_pages = nr_free_buffer_pages()/4;
mutex_lock(&virtio_9p_lock);
list_add_tail(&chan->chan_list, &virtio_chan_list);
diff --git a/net/9p/util.c b/net/9p/util.c
index e048701a72d..da6af81e59d 100644
--- a/net/9p/util.c
+++ b/net/9p/util.c
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(p9_idpool_create);
/**
* p9_idpool_destroy - create a new per-connection id pool
- * @p: idpool to destory
+ * @p: idpool to destroy
*/
void p9_idpool_destroy(struct p9_idpool *p)
@@ -92,7 +92,7 @@ int p9_idpool_get(struct p9_idpool *p)
unsigned long flags;
retry:
- if (idr_pre_get(&p->pool, GFP_KERNEL) == 0)
+ if (idr_pre_get(&p->pool, GFP_NOFS) == 0)
return 0;
spin_lock_irqsave(&p->lock, flags);