diff options
Diffstat (limited to 'fs/cifs/transport.c')
-rw-r--r-- | fs/cifs/transport.c | 290 |
1 files changed, 202 insertions, 88 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 83867ef348d..2126ab18504 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -27,6 +27,8 @@ #include <linux/net.h> #include <linux/delay.h> #include <linux/freezer.h> +#include <linux/tcp.h> +#include <linux/highmem.h> #include <asm/uaccess.h> #include <asm/processor.h> #include <linux/mempool.h> @@ -109,8 +111,8 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) mempool_free(midEntry, cifs_mid_poolp); } -static void -delete_mid(struct mid_q_entry *mid) +void +cifs_delete_mid(struct mid_q_entry *mid) { spin_lock(&GlobalMid_Lock); list_del(&mid->qhead); @@ -119,18 +121,29 @@ delete_mid(struct mid_q_entry *mid) DeleteMidQEntry(mid); } +/* + * smb_send_kvec - send an array of kvecs to the server + * @server: Server to send the data to + * @iov: Pointer to array of kvecs + * @n_vec: length of kvec array + * @sent: amount of data sent on socket is stored here + * + * Our basic "send data to server" function. Should be called with srv_mutex + * held. The caller is responsible for handling the results. + */ static int -smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) +smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, + size_t *sent) { int rc = 0; int i = 0; struct msghdr smb_msg; - unsigned int len = iov[0].iov_len; - unsigned int total_len; - int first_vec = 0; - unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); + unsigned int remaining; + size_t first_vec = 0; struct socket *ssocket = server->ssocket; + *sent = 0; + if (ssocket == NULL) return -ENOTSOCK; /* BB eventually add reconnect code here */ @@ -143,56 +156,60 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) else smb_msg.msg_flags = MSG_NOSIGNAL; - total_len = 0; + remaining = 0; for (i = 0; i < n_vec; i++) - total_len += iov[i].iov_len; - - cFYI(1, "Sending smb: total_len %d", total_len); - dump_smb(iov[0].iov_base, len); + remaining += iov[i].iov_len; i = 0; - while (total_len) { + while (remaining) { + /* + * If blocking send, we try 3 times, since each can block + * for 5 seconds. For nonblocking we have to try more + * but wait increasing amounts of time allowing time for + * socket to clear. The overall time we wait in either + * case to send on the socket is about 15 seconds. + * Similarly we wait for 15 seconds for a response from + * the server in SendReceive[2] for the server to send + * a response back for most types of requests (except + * SMB Write past end of file which can be slow, and + * blocking lock operations). NFS waits slightly longer + * than CIFS, but this can make it take longer for + * nonresponsive servers to be detected and 15 seconds + * is more than enough time for modern networks to + * send a packet. In most cases if we fail to send + * after the retries we will kill the socket and + * reconnect which may clear the network problem. + */ rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], - n_vec - first_vec, total_len); - if ((rc == -ENOSPC) || (rc == -EAGAIN)) { + n_vec - first_vec, remaining); + if (rc == -ENOSPC || rc == -EAGAIN) { i++; - /* - * If blocking send we try 3 times, since each can block - * for 5 seconds. For nonblocking we have to try more - * but wait increasing amounts of time allowing time for - * socket to clear. The overall time we wait in either - * case to send on the socket is about 15 seconds. - * Similarly we wait for 15 seconds for a response from - * the server in SendReceive[2] for the server to send - * a response back for most types of requests (except - * SMB Write past end of file which can be slow, and - * blocking lock operations). NFS waits slightly longer - * than CIFS, but this can make it take longer for - * nonresponsive servers to be detected and 15 seconds - * is more than enough time for modern networks to - * send a packet. In most cases if we fail to send - * after the retries we will kill the socket and - * reconnect which may clear the network problem. - */ - if ((i >= 14) || (!server->noblocksnd && (i > 2))) { - cERROR(1, "sends on sock %p stuck for 15 seconds", - ssocket); + if (i >= 14 || (!server->noblocksnd && (i > 2))) { + cERROR(1, "sends on sock %p stuck for 15 " + "seconds", ssocket); rc = -EAGAIN; break; } msleep(1 << i); continue; } + if (rc < 0) break; - if (rc == total_len) { - total_len = 0; + /* send was at least partially successful */ + *sent += rc; + + if (rc == remaining) { + remaining = 0; break; - } else if (rc > total_len) { - cERROR(1, "sent %d requested %d", rc, total_len); + } + + if (rc > remaining) { + cERROR(1, "sent %d requested %d", rc, remaining); break; } + if (rc == 0) { /* should never happen, letting socket clear before retrying is our only obvious option here */ @@ -200,7 +217,9 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) msleep(500); continue; } - total_len -= rc; + + remaining -= rc; + /* the line below resets i */ for (i = first_vec; i < n_vec; i++) { if (iov[i].iov_len) { @@ -215,16 +234,97 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) } } } + i = 0; /* in case we get ENOSPC on the next send */ + rc = 0; } + return rc; +} + +/** + * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec + * @rqst: pointer to smb_rqst + * @idx: index into the array of the page + * @iov: pointer to struct kvec that will hold the result + * + * Helper function to convert a slot in the rqst->rq_pages array into a kvec. + * The page will be kmapped and the address placed into iov_base. The length + * will then be adjusted according to the ptailoff. + */ +void +cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, + struct kvec *iov) +{ + /* + * FIXME: We could avoid this kmap altogether if we used + * kernel_sendpage instead of kernel_sendmsg. That will only + * work if signing is disabled though as sendpage inlines the + * page directly into the fraglist. If userspace modifies the + * page after we calculate the signature, then the server will + * reject it and may break the connection. kernel_sendmsg does + * an extra copy of the data and avoids that issue. + */ + iov->iov_base = kmap(rqst->rq_pages[idx]); + + /* if last page, don't send beyond this offset into page */ + if (idx == (rqst->rq_npages - 1)) + iov->iov_len = rqst->rq_tailsz; + else + iov->iov_len = rqst->rq_pagesz; +} + +static int +smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) +{ + int rc; + struct kvec *iov = rqst->rq_iov; + int n_vec = rqst->rq_nvec; + unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); + unsigned int i; + size_t total_len = 0, sent; + struct socket *ssocket = server->ssocket; + int val = 1; + + cFYI(1, "Sending smb: smb_len=%u", smb_buf_length); + dump_smb(iov[0].iov_base, iov[0].iov_len); + + /* cork the socket */ + kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, + (char *)&val, sizeof(val)); + + rc = smb_send_kvec(server, iov, n_vec, &sent); + if (rc < 0) + goto uncork; + + total_len += sent; + + /* now walk the page array and send each page in it */ + for (i = 0; i < rqst->rq_npages; i++) { + struct kvec p_iov; + + cifs_rqst_page_to_kvec(rqst, i, &p_iov); + rc = smb_send_kvec(server, &p_iov, 1, &sent); + kunmap(rqst->rq_pages[i]); + if (rc < 0) + break; + + total_len += sent; + } + +uncork: + /* uncork it */ + val = 0; + kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, + (char *)&val, sizeof(val)); if ((total_len > 0) && (total_len != smb_buf_length + 4)) { - cFYI(1, "partial send (%d remaining), terminating session", - total_len); - /* If we have only sent part of an SMB then the next SMB - could be taken as the remainder of this one. We need - to kill the socket so the server throws away the partial - SMB */ + cFYI(1, "partial send (wanted=%u sent=%zu): terminating " + "session", smb_buf_length + 4, total_len); + /* + * If we have only sent part of an SMB then the next SMB could + * be taken as the remainder of this one. We need to kill the + * socket so the server throws away the partial SMB + */ server->tcpStatus = CifsNeedReconnect; } @@ -236,6 +336,15 @@ smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) return rc; } +static int +smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) +{ + struct smb_rqst rqst = { .rq_iov = iov, + .rq_nvec = n_vec }; + + return smb_send_rqst(server, &rqst); +} + int smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer, unsigned int smb_buf_length) @@ -345,12 +454,11 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) return 0; } -int -cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, - unsigned int nvec, struct mid_q_entry **ret_mid) +struct mid_q_entry * +cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) { int rc; - struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; + struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; /* enable signing if server requires it */ @@ -359,16 +467,15 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, mid = AllocMidQEntry(hdr, server); if (mid == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); - rc = cifs_sign_smbv(iov, nvec, server, &mid->sequence_number); + rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); if (rc) { DeleteMidQEntry(mid); - return rc; + return ERR_PTR(rc); } - *ret_mid = mid; - return 0; + return mid; } /* @@ -376,9 +483,9 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct kvec *iov, * the result. Caller is responsible for dealing with timeouts. */ int -cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, - unsigned int nvec, mid_receive_t *receive, - mid_callback_t *callback, void *cbdata, const int flags) +cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, + mid_receive_t *receive, mid_callback_t *callback, + void *cbdata, const int flags) { int rc, timeout, optype; struct mid_q_entry *mid; @@ -391,12 +498,12 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, return rc; mutex_lock(&server->srv_mutex); - rc = server->ops->setup_async_request(server, iov, nvec, &mid); - if (rc) { + mid = server->ops->setup_async_request(server, rqst); + if (IS_ERR(mid)) { mutex_unlock(&server->srv_mutex); add_credits(server, 1, optype); wake_up(&server->request_q); - return rc; + return PTR_ERR(mid); } mid->receive = receive; @@ -411,7 +518,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, cifs_in_send_inc(server); - rc = smb_sendv(server, iov, nvec); + rc = smb_send_rqst(server, rqst); cifs_in_send_dec(server); cifs_save_when_sent(mid); mutex_unlock(&server->srv_mutex); @@ -419,7 +526,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, if (rc == 0) return 0; - delete_mid(mid); + cifs_delete_mid(mid); add_credits(server, 1, optype); wake_up(&server->request_q); return rc; @@ -503,35 +610,40 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, /* convert the length into a more usable form */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { struct kvec iov; + int rc = 0; + struct smb_rqst rqst = { .rq_iov = &iov, + .rq_nvec = 1 }; iov.iov_base = mid->resp_buf; iov.iov_len = len; /* FIXME: add code to kill session */ - if (cifs_verify_signature(&iov, 1, server, - mid->sequence_number + 1) != 0) - cERROR(1, "Unexpected SMB signature"); + rc = cifs_verify_signature(&rqst, server, + mid->sequence_number + 1); + if (rc) + cERROR(1, "SMB signature verification returned error = " + "%d", rc); } /* BB special case reconnect tid and uid here? */ return map_smb_to_linux_error(mid->resp_buf, log_error); } -int -cifs_setup_request(struct cifs_ses *ses, struct kvec *iov, - unsigned int nvec, struct mid_q_entry **ret_mid) +struct mid_q_entry * +cifs_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst) { int rc; - struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; + struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; rc = allocate_mid(ses, hdr, &mid); if (rc) - return rc; - rc = cifs_sign_smbv(iov, nvec, ses->server, &mid->sequence_number); - if (rc) - delete_mid(mid); - *ret_mid = mid; - return rc; + return ERR_PTR(rc); + rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); + if (rc) { + cifs_delete_mid(mid); + return ERR_PTR(rc); + } + return mid; } int @@ -544,6 +656,8 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, struct mid_q_entry *midQ; char *buf = iov[0].iov_base; unsigned int credits = 1; + struct smb_rqst rqst = { .rq_iov = iov, + .rq_nvec = n_vec }; timeout = flags & CIFS_TIMEOUT_MASK; optype = flags & CIFS_OP_MASK; @@ -581,13 +695,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, mutex_lock(&ses->server->srv_mutex); - rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ); - if (rc) { + midQ = ses->server->ops->setup_request(ses, &rqst); + if (IS_ERR(midQ)) { mutex_unlock(&ses->server->srv_mutex); cifs_small_buf_release(buf); /* Update # of requests on wire to server */ add_credits(ses->server, 1, optype); - return rc; + return PTR_ERR(midQ); } midQ->mid_state = MID_REQUEST_SUBMITTED; @@ -649,11 +763,11 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses, rc = ses->server->ops->check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR); - /* mark it so buf will not be freed by delete_mid */ + /* mark it so buf will not be freed by cifs_delete_mid */ if ((flags & CIFS_NO_RESP) == 0) midQ->resp_buf = NULL; out: - delete_mid(midQ); + cifs_delete_mid(midQ); add_credits(ses->server, credits, optype); return rc; @@ -759,7 +873,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, ses->server, 0); out: - delete_mid(midQ); + cifs_delete_mid(midQ); add_credits(ses->server, 1, 0); return rc; @@ -843,7 +957,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, rc = cifs_sign_smb(in_buf, ses->server, &midQ->sequence_number); if (rc) { - delete_mid(midQ); + cifs_delete_mid(midQ); mutex_unlock(&ses->server->srv_mutex); return rc; } @@ -856,7 +970,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, mutex_unlock(&ses->server->srv_mutex); if (rc < 0) { - delete_mid(midQ); + cifs_delete_mid(midQ); return rc; } @@ -877,7 +991,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, blocking lock to return. */ rc = send_cancel(ses->server, in_buf, midQ); if (rc) { - delete_mid(midQ); + cifs_delete_mid(midQ); return rc; } } else { @@ -889,7 +1003,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, /* If we get -ENOLCK back the lock may have already been removed. Don't exit in this case. */ if (rc && rc != -ENOLCK) { - delete_mid(midQ); + cifs_delete_mid(midQ); return rc; } } @@ -926,7 +1040,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, ses->server, 0); out: - delete_mid(midQ); + cifs_delete_mid(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; return rc; |