diff options
author | Christof Schmitt <christof.schmitt@de.ibm.com> | 2011-02-22 19:54:40 +0100 |
---|---|---|
committer | James Bottomley <James.Bottomley@suse.de> | 2011-02-25 12:01:59 -0500 |
commit | c7b279ae51942c14529bf2806685e9c658f28611 (patch) | |
tree | 76d48640ccd62f7f375180cb9179f03079ac8c0c /drivers/s390/scsi/zfcp_fsf.c | |
parent | 7c35e77b96b2f0af8c278c13d484d42dad3c7422 (diff) |
[SCSI] zfcp: Replace kmem_cache for "status read" data
zfcp requires a mempool for the status read data blocks to resubmit
the "status read" requests at any time. Each status read data block
has the size of a page (4096 bytes) and needs to be placed in one
page.
Instead of having a kmem_cache for allocating page sized chunks, use
mempool_create_page_pool to create a mempool returning pages and
remove the zfcp kmem_cache.
Signed-off-by: Christof Schmitt <christof.schmitt@de.ibm.com>
Signed-off-by: Steffen Maier <maier@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
Diffstat (limited to 'drivers/s390/scsi/zfcp_fsf.c')
-rw-r--r-- | drivers/s390/scsi/zfcp_fsf.c | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6efaea9207c..a2b0e8435fc 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -212,7 +212,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_dbf_hba_fsf_uss("fssrh_1", req); - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); return; } @@ -265,7 +265,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) break; } - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); zfcp_fsf_req_free(req); atomic_inc(&adapter->stat_miss); @@ -723,6 +723,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) struct zfcp_adapter *adapter = qdio->adapter; struct zfcp_fsf_req *req; struct fsf_status_read_buffer *sr_buf; + struct page *page; int retval = -EIO; spin_lock_irq(&qdio->req_q_lock); @@ -736,11 +737,12 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) goto out; } - sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC); - if (!sr_buf) { + page = mempool_alloc(adapter->pool.sr_data, GFP_ATOMIC); + if (!page) { retval = -ENOMEM; goto failed_buf; } + sr_buf = page_address(page); memset(sr_buf, 0, sizeof(*sr_buf)); req->data = sr_buf; @@ -755,7 +757,7 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio) failed_req_send: req->data = NULL; - mempool_free(sr_buf, adapter->pool.status_read_data); + mempool_free(virt_to_page(sr_buf), adapter->pool.sr_data); failed_buf: zfcp_dbf_hba_fsf_uss("fssr__1", req); zfcp_fsf_req_free(req); |