2007-09-11 01:50:12 +08:00
|
|
|
/*
|
2007-09-11 01:50:42 +08:00
|
|
|
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the BSD-type
|
|
|
|
* license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials provided
|
|
|
|
* with the distribution.
|
|
|
|
*
|
|
|
|
* Neither the name of the Network Appliance, Inc. nor the names of
|
|
|
|
* its contributors may be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* rpc_rdma.c
|
|
|
|
*
|
|
|
|
* This file contains the guts of the RPC RDMA protocol, and
|
|
|
|
* does marshaling/unmarshaling, etc. It is also where interfacing
|
|
|
|
* to the Linux RPC framework lives.
|
2007-09-11 01:50:12 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "xprt_rdma.h"
|
|
|
|
|
2007-09-11 01:50:42 +08:00
|
|
|
#include <linux/highmem.h>
|
|
|
|
|
2014-11-18 05:58:04 +08:00
|
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
2007-09-11 01:50:42 +08:00
|
|
|
# define RPCDBG_FACILITY RPCDBG_TRANS
|
|
|
|
#endif
|
|
|
|
|
2015-03-31 02:33:53 +08:00
|
|
|
enum rpcrdma_chunktype {
|
|
|
|
rpcrdma_noch = 0,
|
|
|
|
rpcrdma_readch,
|
|
|
|
rpcrdma_areadch,
|
|
|
|
rpcrdma_writech,
|
|
|
|
rpcrdma_replych
|
|
|
|
};
|
|
|
|
|
2014-11-18 05:58:04 +08:00
|
|
|
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
|
2007-09-11 01:50:42 +08:00
|
|
|
static const char transfertypes[][12] = {
|
|
|
|
"pure inline", /* no chunks */
|
|
|
|
" read chunk", /* some argument via rdma read */
|
|
|
|
"*read chunk", /* entire request via rdma read */
|
|
|
|
"write chunk", /* some result via rdma write */
|
|
|
|
"reply chunk" /* entire reply via rdma write */
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
2015-08-04 01:03:49 +08:00
|
|
|
/* The client can send a request inline as long as the RPCRDMA header
|
|
|
|
* plus the RPC call fit under the transport's inline limit. If the
|
|
|
|
* combined call message size exceeds that limit, the client must use
|
|
|
|
* the read chunk list for this operation.
|
|
|
|
*/
|
|
|
|
static bool rpcrdma_args_inline(struct rpc_rqst *rqst)
|
|
|
|
{
|
|
|
|
unsigned int callsize = RPCRDMA_HDRLEN_MIN + rqst->rq_snd_buf.len;
|
|
|
|
|
|
|
|
return callsize <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The client can't know how large the actual reply will be. Thus it
|
|
|
|
* plans for the largest possible reply for that particular ULP
|
|
|
|
* operation. If the maximum combined reply message size exceeds that
|
|
|
|
* limit, the client must provide a write list or a reply chunk for
|
|
|
|
* this request.
|
|
|
|
*/
|
|
|
|
static bool rpcrdma_results_inline(struct rpc_rqst *rqst)
|
|
|
|
{
|
|
|
|
unsigned int repsize = RPCRDMA_HDRLEN_MIN + rqst->rq_rcv_buf.buflen;
|
|
|
|
|
|
|
|
return repsize <= RPCRDMA_INLINE_READ_THRESHOLD(rqst);
|
|
|
|
}
|
|
|
|
|
2015-08-04 01:04:17 +08:00
|
|
|
static int
|
|
|
|
rpcrdma_tail_pullup(struct xdr_buf *buf)
|
|
|
|
{
|
|
|
|
size_t tlen = buf->tail[0].iov_len;
|
|
|
|
size_t skip = tlen & 3;
|
|
|
|
|
|
|
|
/* Do not include the tail if it is only an XDR pad */
|
|
|
|
if (tlen < 4)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* xdr_write_pages() adds a pad at the beginning of the tail
|
|
|
|
* if the content in "buf->pages" is unaligned. Force the
|
|
|
|
* tail's actual content to land at the next XDR position
|
|
|
|
* after the head instead.
|
|
|
|
*/
|
|
|
|
if (skip) {
|
|
|
|
unsigned char *src, *dst;
|
|
|
|
unsigned int count;
|
|
|
|
|
|
|
|
src = buf->tail[0].iov_base;
|
|
|
|
dst = buf->head[0].iov_base;
|
|
|
|
dst += buf->head[0].iov_len;
|
|
|
|
|
|
|
|
src += skip;
|
|
|
|
tlen -= skip;
|
|
|
|
|
|
|
|
dprintk("RPC: %s: skip=%zu, memmove(%p, %p, %zu)\n",
|
|
|
|
__func__, skip, dst, src, tlen);
|
|
|
|
|
|
|
|
for (count = tlen; count; count--)
|
|
|
|
*dst++ = *src++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return tlen;
|
|
|
|
}
|
|
|
|
|
xprtrdma: Segment head and tail XDR buffers on page boundaries
A single memory allocation is used for the pair of buffers wherein
the RPC client builds an RPC call message and decodes its matching
reply. These buffers are sized based on the maximum possible size
of the RPC call and reply messages for the operation in progress.
This means that as the call buffer increases in size, the start of
the reply buffer is pushed farther into the memory allocation.
RPC requests are growing in size. It used to be that both the call
and reply buffers fit inside a single page.
But these days, thanks to NFSv4 (and especially security labels in
NFSv4.2) the maximum call and reply sizes are large. NFSv4.0 OPEN,
for example, now requires a 6KB allocation for a pair of call and
reply buffers, and NFSv4 LOOKUP is not far behind.
As the maximum size of a call increases, the reply buffer is pushed
far enough into the buffer's memory allocation that a page boundary
can appear in the middle of it.
When the maximum possible reply size is larger than the client's
RDMA receive buffers (currently 1KB), the client has to register a
Reply chunk for the server to RDMA Write the reply into.
The logic in rpcrdma_convert_iovs() assumes that xdr_buf head and
tail buffers would always be contained on a single page. It supplies
just one segment for the head and one for the tail.
FMR, for example, registers up to a page boundary (only a portion of
the reply buffer in the OPEN case above). But without additional
segments, it doesn't register the rest of the buffer.
When the server tries to write the OPEN reply, the RDMA Write fails
with a remote access error since the client registered only part of
the Reply chunk.
rpcrdma_convert_iovs() must split the XDR buffer into multiple
segments, each of which are guaranteed not to contain a page
boundary. That way fmr_op_map is given the proper number of segments
to register the whole reply buffer.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Devesh Sharma <devesh.sharma@broadcom.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-03-05 00:27:52 +08:00
|
|
|
/* Split "vec" on page boundaries into segments. FMR registers pages,
|
|
|
|
* not a byte range. Other modes coalesce these segments into a single
|
|
|
|
* MR when they can.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
|
|
|
|
int n, int nsegs)
|
|
|
|
{
|
|
|
|
size_t page_offset;
|
|
|
|
u32 remaining;
|
|
|
|
char *base;
|
|
|
|
|
|
|
|
base = vec->iov_base;
|
|
|
|
page_offset = offset_in_page(base);
|
|
|
|
remaining = vec->iov_len;
|
|
|
|
while (remaining && n < nsegs) {
|
|
|
|
seg[n].mr_page = NULL;
|
|
|
|
seg[n].mr_offset = base;
|
|
|
|
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
|
|
|
|
remaining -= seg[n].mr_len;
|
|
|
|
base += seg[n].mr_len;
|
|
|
|
++n;
|
|
|
|
page_offset = 0;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2007-09-11 01:50:42 +08:00
|
|
|
/*
|
|
|
|
* Chunk assembly from upper layer xdr_buf.
|
|
|
|
*
|
|
|
|
* Prepare the passed-in xdr_buf into representation as RPC/RDMA chunk
|
|
|
|
* elements. Segments are then coalesced when registered, if possible
|
|
|
|
* within the selected memreg mode.
|
2014-05-28 22:35:14 +08:00
|
|
|
*
|
|
|
|
* Returns positive number of segments converted, or a negative errno.
|
2007-09-11 01:50:42 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static int
|
2007-10-27 01:30:43 +08:00
|
|
|
rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
|
2007-09-11 01:50:42 +08:00
|
|
|
enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
|
|
|
|
{
|
|
|
|
int len, n = 0, p;
|
2011-02-10 03:45:28 +08:00
|
|
|
int page_base;
|
|
|
|
struct page **ppages;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
xprtrdma: Segment head and tail XDR buffers on page boundaries
A single memory allocation is used for the pair of buffers wherein
the RPC client builds an RPC call message and decodes its matching
reply. These buffers are sized based on the maximum possible size
of the RPC call and reply messages for the operation in progress.
This means that as the call buffer increases in size, the start of
the reply buffer is pushed farther into the memory allocation.
RPC requests are growing in size. It used to be that both the call
and reply buffers fit inside a single page.
But these days, thanks to NFSv4 (and especially security labels in
NFSv4.2) the maximum call and reply sizes are large. NFSv4.0 OPEN,
for example, now requires a 6KB allocation for a pair of call and
reply buffers, and NFSv4 LOOKUP is not far behind.
As the maximum size of a call increases, the reply buffer is pushed
far enough into the buffer's memory allocation that a page boundary
can appear in the middle of it.
When the maximum possible reply size is larger than the client's
RDMA receive buffers (currently 1KB), the client has to register a
Reply chunk for the server to RDMA Write the reply into.
The logic in rpcrdma_convert_iovs() assumes that xdr_buf head and
tail buffers would always be contained on a single page. It supplies
just one segment for the head and one for the tail.
FMR, for example, registers up to a page boundary (only a portion of
the reply buffer in the OPEN case above). But without additional
segments, it doesn't register the rest of the buffer.
When the server tries to write the OPEN reply, the RDMA Write fails
with a remote access error since the client registered only part of
the Reply chunk.
rpcrdma_convert_iovs() must split the XDR buffer into multiple
segments, each of which are guaranteed not to contain a page
boundary. That way fmr_op_map is given the proper number of segments
to register the whole reply buffer.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Devesh Sharma <devesh.sharma@broadcom.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-03-05 00:27:52 +08:00
|
|
|
if (pos == 0) {
|
|
|
|
n = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, n, nsegs);
|
|
|
|
if (n == nsegs)
|
|
|
|
return -EIO;
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
|
|
|
|
2011-02-10 03:45:28 +08:00
|
|
|
len = xdrbuf->page_len;
|
|
|
|
ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
|
|
|
|
page_base = xdrbuf->page_base & ~PAGE_MASK;
|
|
|
|
p = 0;
|
|
|
|
while (len && n < nsegs) {
|
2014-05-28 22:34:24 +08:00
|
|
|
if (!ppages[p]) {
|
|
|
|
/* alloc the pagelist for receiving buffer */
|
|
|
|
ppages[p] = alloc_page(GFP_ATOMIC);
|
|
|
|
if (!ppages[p])
|
2014-05-28 22:35:14 +08:00
|
|
|
return -ENOMEM;
|
2014-05-28 22:34:24 +08:00
|
|
|
}
|
2011-02-10 03:45:28 +08:00
|
|
|
seg[n].mr_page = ppages[p];
|
|
|
|
seg[n].mr_offset = (void *)(unsigned long) page_base;
|
|
|
|
seg[n].mr_len = min_t(u32, PAGE_SIZE - page_base, len);
|
2014-05-28 22:35:14 +08:00
|
|
|
if (seg[n].mr_len > PAGE_SIZE)
|
|
|
|
return -EIO;
|
2011-02-10 03:45:28 +08:00
|
|
|
len -= seg[n].mr_len;
|
2007-09-11 01:50:42 +08:00
|
|
|
++n;
|
2011-02-10 03:45:28 +08:00
|
|
|
++p;
|
|
|
|
page_base = 0; /* page offset only applies to first page */
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
|
|
|
|
2011-02-10 03:45:28 +08:00
|
|
|
/* Message overflows the seg array */
|
|
|
|
if (len && n == nsegs)
|
2014-05-28 22:35:14 +08:00
|
|
|
return -EIO;
|
2011-02-10 03:45:28 +08:00
|
|
|
|
2015-08-04 01:04:17 +08:00
|
|
|
/* When encoding the read list, the tail is always sent inline */
|
|
|
|
if (type == rpcrdma_readch)
|
|
|
|
return n;
|
|
|
|
|
2007-12-11 00:24:48 +08:00
|
|
|
if (xdrbuf->tail[0].iov_len) {
|
2008-10-10 03:01:11 +08:00
|
|
|
/* the rpcrdma protocol allows us to omit any trailing
|
|
|
|
* xdr pad bytes, saving the server an RDMA operation. */
|
|
|
|
if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
|
|
|
|
return n;
|
xprtrdma: Segment head and tail XDR buffers on page boundaries
A single memory allocation is used for the pair of buffers wherein
the RPC client builds an RPC call message and decodes its matching
reply. These buffers are sized based on the maximum possible size
of the RPC call and reply messages for the operation in progress.
This means that as the call buffer increases in size, the start of
the reply buffer is pushed farther into the memory allocation.
RPC requests are growing in size. It used to be that both the call
and reply buffers fit inside a single page.
But these days, thanks to NFSv4 (and especially security labels in
NFSv4.2) the maximum call and reply sizes are large. NFSv4.0 OPEN,
for example, now requires a 6KB allocation for a pair of call and
reply buffers, and NFSv4 LOOKUP is not far behind.
As the maximum size of a call increases, the reply buffer is pushed
far enough into the buffer's memory allocation that a page boundary
can appear in the middle of it.
When the maximum possible reply size is larger than the client's
RDMA receive buffers (currently 1KB), the client has to register a
Reply chunk for the server to RDMA Write the reply into.
The logic in rpcrdma_convert_iovs() assumes that xdr_buf head and
tail buffers would always be contained on a single page. It supplies
just one segment for the head and one for the tail.
FMR, for example, registers up to a page boundary (only a portion of
the reply buffer in the OPEN case above). But without additional
segments, it doesn't register the rest of the buffer.
When the server tries to write the OPEN reply, the RDMA Write fails
with a remote access error since the client registered only part of
the Reply chunk.
rpcrdma_convert_iovs() must split the XDR buffer into multiple
segments, each of which are guaranteed not to contain a page
boundary. That way fmr_op_map is given the proper number of segments
to register the whole reply buffer.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Devesh Sharma <devesh.sharma@broadcom.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2016-03-05 00:27:52 +08:00
|
|
|
n = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, n, nsegs);
|
2007-09-11 01:50:42 +08:00
|
|
|
if (n == nsegs)
|
2014-05-28 22:35:14 +08:00
|
|
|
return -EIO;
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create read/write chunk lists, and reply chunks, for RDMA
|
|
|
|
*
|
|
|
|
* Assume check against THRESHOLD has been done, and chunks are required.
|
|
|
|
* Assume only encoding one list entry for read|write chunks. The NFSv3
|
|
|
|
* protocol is simple enough to allow this as it only has a single "bulk
|
|
|
|
* result" in each procedure - complicated NFSv4 COMPOUNDs are not. (The
|
|
|
|
* RDMA/Sessions NFSv4 proposal addresses this for future v4 revs.)
|
|
|
|
*
|
|
|
|
* When used for a single reply chunk (which is a special write
|
|
|
|
* chunk used for the entire reply, rather than just the data), it
|
|
|
|
* is used primarily for READDIR and READLINK which would otherwise
|
|
|
|
* be severely size-limited by a small rdma inline read max. The server
|
|
|
|
* response will come back as an RDMA Write, followed by a message
|
|
|
|
* of type RDMA_NOMSG carrying the xid and length. As a result, reply
|
|
|
|
* chunks do not provide data alignment, however they do not require
|
|
|
|
* "fixup" (moving the response to the upper layer buffer) either.
|
|
|
|
*
|
|
|
|
* Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
|
|
|
|
*
|
|
|
|
* Read chunklist (a linked list):
|
|
|
|
* N elements, position P (same P for all chunks of same arg!):
|
|
|
|
* 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
|
|
|
|
*
|
|
|
|
* Write chunklist (a list of (one) counted array):
|
|
|
|
* N elements:
|
|
|
|
* 1 - N - HLOO - HLOO - ... - HLOO - 0
|
|
|
|
*
|
|
|
|
* Reply chunk (a counted array):
|
|
|
|
* N elements:
|
|
|
|
* 1 - N - HLOO - HLOO - ... - HLOO
|
2014-05-28 22:35:14 +08:00
|
|
|
*
|
|
|
|
* Returns positive RPC/RDMA header size, or negative errno.
|
2007-09-11 01:50:42 +08:00
|
|
|
*/
|
|
|
|
|
2014-05-28 22:35:14 +08:00
|
|
|
static ssize_t
|
2007-09-11 01:50:42 +08:00
|
|
|
rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
|
|
|
|
struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
|
|
|
|
{
|
|
|
|
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
2013-01-08 22:10:21 +08:00
|
|
|
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
|
2014-05-28 22:35:14 +08:00
|
|
|
int n, nsegs, nchunks = 0;
|
2007-10-27 01:30:43 +08:00
|
|
|
unsigned int pos;
|
2007-09-11 01:50:42 +08:00
|
|
|
struct rpcrdma_mr_seg *seg = req->rl_segments;
|
|
|
|
struct rpcrdma_read_chunk *cur_rchunk = NULL;
|
|
|
|
struct rpcrdma_write_array *warray = NULL;
|
|
|
|
struct rpcrdma_write_chunk *cur_wchunk = NULL;
|
2007-10-29 12:37:58 +08:00
|
|
|
__be32 *iptr = headerp->rm_body.rm_chunks;
|
2015-03-31 02:34:39 +08:00
|
|
|
int (*map)(struct rpcrdma_xprt *, struct rpcrdma_mr_seg *, int, bool);
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
if (type == rpcrdma_readch || type == rpcrdma_areadch) {
|
|
|
|
/* a read chunk - server will RDMA Read our memory */
|
|
|
|
cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
|
|
|
|
} else {
|
|
|
|
/* a write or reply chunk - server will RDMA Write our memory */
|
|
|
|
*iptr++ = xdr_zero; /* encode a NULL read chunk list */
|
|
|
|
if (type == rpcrdma_replych)
|
|
|
|
*iptr++ = xdr_zero; /* a NULL write chunk list */
|
|
|
|
warray = (struct rpcrdma_write_array *) iptr;
|
|
|
|
cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (type == rpcrdma_replych || type == rpcrdma_areadch)
|
|
|
|
pos = 0;
|
|
|
|
else
|
|
|
|
pos = target->head[0].iov_len;
|
|
|
|
|
|
|
|
nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
|
2014-05-28 22:35:14 +08:00
|
|
|
if (nsegs < 0)
|
|
|
|
return nsegs;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-03-31 02:34:39 +08:00
|
|
|
map = r_xprt->rx_ia.ri_ops->ro_map;
|
2007-09-11 01:50:42 +08:00
|
|
|
do {
|
2015-03-31 02:34:39 +08:00
|
|
|
n = map(r_xprt, seg, nsegs, cur_wchunk != NULL);
|
2007-09-11 01:50:42 +08:00
|
|
|
if (n <= 0)
|
|
|
|
goto out;
|
|
|
|
if (cur_rchunk) { /* read */
|
|
|
|
cur_rchunk->rc_discrim = xdr_one;
|
|
|
|
/* all read chunks have the same "position" */
|
2015-01-22 00:02:13 +08:00
|
|
|
cur_rchunk->rc_position = cpu_to_be32(pos);
|
|
|
|
cur_rchunk->rc_target.rs_handle =
|
|
|
|
cpu_to_be32(seg->mr_rkey);
|
|
|
|
cur_rchunk->rc_target.rs_length =
|
|
|
|
cpu_to_be32(seg->mr_len);
|
2007-09-11 01:50:42 +08:00
|
|
|
xdr_encode_hyper(
|
2007-10-29 12:37:58 +08:00
|
|
|
(__be32 *)&cur_rchunk->rc_target.rs_offset,
|
2007-09-11 01:50:42 +08:00
|
|
|
seg->mr_base);
|
|
|
|
dprintk("RPC: %s: read chunk "
|
2007-10-27 01:30:43 +08:00
|
|
|
"elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
|
2007-10-30 15:44:32 +08:00
|
|
|
seg->mr_len, (unsigned long long)seg->mr_base,
|
|
|
|
seg->mr_rkey, pos, n < nsegs ? "more" : "last");
|
2007-09-11 01:50:42 +08:00
|
|
|
cur_rchunk++;
|
|
|
|
r_xprt->rx_stats.read_chunk_count++;
|
|
|
|
} else { /* write/reply */
|
2015-01-22 00:02:13 +08:00
|
|
|
cur_wchunk->wc_target.rs_handle =
|
|
|
|
cpu_to_be32(seg->mr_rkey);
|
|
|
|
cur_wchunk->wc_target.rs_length =
|
|
|
|
cpu_to_be32(seg->mr_len);
|
2007-09-11 01:50:42 +08:00
|
|
|
xdr_encode_hyper(
|
2007-10-29 12:37:58 +08:00
|
|
|
(__be32 *)&cur_wchunk->wc_target.rs_offset,
|
2007-09-11 01:50:42 +08:00
|
|
|
seg->mr_base);
|
|
|
|
dprintk("RPC: %s: %s chunk "
|
|
|
|
"elem %d@0x%llx:0x%x (%s)\n", __func__,
|
|
|
|
(type == rpcrdma_replych) ? "reply" : "write",
|
2007-10-30 15:44:32 +08:00
|
|
|
seg->mr_len, (unsigned long long)seg->mr_base,
|
|
|
|
seg->mr_rkey, n < nsegs ? "more" : "last");
|
2007-09-11 01:50:42 +08:00
|
|
|
cur_wchunk++;
|
|
|
|
if (type == rpcrdma_replych)
|
|
|
|
r_xprt->rx_stats.reply_chunk_count++;
|
|
|
|
else
|
|
|
|
r_xprt->rx_stats.write_chunk_count++;
|
|
|
|
r_xprt->rx_stats.total_rdma_request += seg->mr_len;
|
|
|
|
}
|
|
|
|
nchunks++;
|
|
|
|
seg += n;
|
|
|
|
nsegs -= n;
|
|
|
|
} while (nsegs);
|
|
|
|
|
|
|
|
/* success. all failures return above */
|
|
|
|
req->rl_nchunks = nchunks;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* finish off header. If write, marshal discrim and nchunks.
|
|
|
|
*/
|
|
|
|
if (cur_rchunk) {
|
2007-10-29 12:37:58 +08:00
|
|
|
iptr = (__be32 *) cur_rchunk;
|
2007-09-11 01:50:42 +08:00
|
|
|
*iptr++ = xdr_zero; /* finish the read chunk list */
|
|
|
|
*iptr++ = xdr_zero; /* encode a NULL write chunk list */
|
|
|
|
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
|
|
|
|
} else {
|
|
|
|
warray->wc_discrim = xdr_one;
|
2015-01-22 00:02:13 +08:00
|
|
|
warray->wc_nchunks = cpu_to_be32(nchunks);
|
2007-10-29 12:37:58 +08:00
|
|
|
iptr = (__be32 *) cur_wchunk;
|
2007-09-11 01:50:42 +08:00
|
|
|
if (type == rpcrdma_writech) {
|
|
|
|
*iptr++ = xdr_zero; /* finish the write chunk list */
|
|
|
|
*iptr++ = xdr_zero; /* encode a NULL reply chunk */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return header size.
|
|
|
|
*/
|
|
|
|
return (unsigned char *)iptr - (unsigned char *)headerp;
|
|
|
|
|
|
|
|
out:
|
2015-03-31 02:34:48 +08:00
|
|
|
for (pos = 0; nchunks--;)
|
|
|
|
pos += r_xprt->rx_ia.ri_ops->ro_unmap(r_xprt,
|
|
|
|
&req->rl_segments[pos]);
|
2014-05-28 22:35:14 +08:00
|
|
|
return n;
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy write data inline.
|
|
|
|
* This function is used for "small" requests. Data which is passed
|
|
|
|
* to RPC via iovecs (or page list) is copied directly into the
|
|
|
|
* pre-registered memory buffer for this request. For small amounts
|
|
|
|
* of data, this is efficient. The cutoff value is tunable.
|
|
|
|
*/
|
2015-08-04 01:03:39 +08:00
|
|
|
static void rpcrdma_inline_pullup(struct rpc_rqst *rqst)
|
2007-09-11 01:50:42 +08:00
|
|
|
{
|
|
|
|
int i, npages, curlen;
|
|
|
|
int copy_len;
|
|
|
|
unsigned char *srcp, *destp;
|
|
|
|
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
|
2011-02-10 03:45:28 +08:00
|
|
|
int page_base;
|
|
|
|
struct page **ppages;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
destp = rqst->rq_svec[0].iov_base;
|
|
|
|
curlen = rqst->rq_svec[0].iov_len;
|
|
|
|
destp += curlen;
|
|
|
|
|
2015-08-04 01:03:39 +08:00
|
|
|
dprintk("RPC: %s: destp 0x%p len %d hdrlen %d\n",
|
|
|
|
__func__, destp, rqst->rq_slen, curlen);
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
copy_len = rqst->rq_snd_buf.page_len;
|
2009-03-12 02:37:55 +08:00
|
|
|
|
|
|
|
if (rqst->rq_snd_buf.tail[0].iov_len) {
|
|
|
|
curlen = rqst->rq_snd_buf.tail[0].iov_len;
|
|
|
|
if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) {
|
|
|
|
memmove(destp + copy_len,
|
|
|
|
rqst->rq_snd_buf.tail[0].iov_base, curlen);
|
|
|
|
r_xprt->rx_stats.pullup_copy_count += curlen;
|
|
|
|
}
|
|
|
|
dprintk("RPC: %s: tail destp 0x%p len %d\n",
|
|
|
|
__func__, destp + copy_len, curlen);
|
|
|
|
rqst->rq_svec[0].iov_len += curlen;
|
|
|
|
}
|
2007-09-11 01:50:42 +08:00
|
|
|
r_xprt->rx_stats.pullup_copy_count += copy_len;
|
2011-02-10 03:45:28 +08:00
|
|
|
|
|
|
|
page_base = rqst->rq_snd_buf.page_base;
|
|
|
|
ppages = rqst->rq_snd_buf.pages + (page_base >> PAGE_SHIFT);
|
|
|
|
page_base &= ~PAGE_MASK;
|
|
|
|
npages = PAGE_ALIGN(page_base+copy_len) >> PAGE_SHIFT;
|
2007-09-11 01:50:42 +08:00
|
|
|
for (i = 0; copy_len && i < npages; i++) {
|
2011-02-10 03:45:28 +08:00
|
|
|
curlen = PAGE_SIZE - page_base;
|
2007-09-11 01:50:42 +08:00
|
|
|
if (curlen > copy_len)
|
|
|
|
curlen = copy_len;
|
|
|
|
dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
|
|
|
|
__func__, i, destp, copy_len, curlen);
|
2011-11-25 23:14:40 +08:00
|
|
|
srcp = kmap_atomic(ppages[i]);
|
2011-02-10 03:45:28 +08:00
|
|
|
memcpy(destp, srcp+page_base, curlen);
|
2011-11-25 23:14:40 +08:00
|
|
|
kunmap_atomic(srcp);
|
2007-09-11 01:50:42 +08:00
|
|
|
rqst->rq_svec[0].iov_len += curlen;
|
|
|
|
destp += curlen;
|
|
|
|
copy_len -= curlen;
|
2011-02-10 03:45:28 +08:00
|
|
|
page_base = 0;
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
|
|
|
/* header now contains entire send message */
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Marshal a request: the primary job of this routine is to choose
|
|
|
|
* the transfer modes. See comments below.
|
|
|
|
*
|
|
|
|
* Uses multiple RDMA IOVs for a request:
|
|
|
|
* [0] -- RPC RDMA header, which uses memory from the *start* of the
|
|
|
|
* preregistered buffer that already holds the RPC data in
|
|
|
|
* its middle.
|
|
|
|
* [1] -- the RPC header/data, marshaled by RPC and the NFS protocol.
|
|
|
|
* [2] -- optional padding.
|
|
|
|
* [3] -- if padded, header only in [1] and data here.
|
2014-05-28 22:35:14 +08:00
|
|
|
*
|
|
|
|
* Returns zero on success, otherwise a negative errno.
|
2007-09-11 01:50:42 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
int
|
|
|
|
rpcrdma_marshal_req(struct rpc_rqst *rqst)
|
|
|
|
{
|
2013-01-08 22:10:21 +08:00
|
|
|
struct rpc_xprt *xprt = rqst->rq_xprt;
|
2007-09-11 01:50:42 +08:00
|
|
|
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
|
|
|
|
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
|
|
|
|
char *base;
|
2015-08-04 01:03:39 +08:00
|
|
|
size_t rpclen;
|
2014-05-28 22:35:14 +08:00
|
|
|
ssize_t hdrlen;
|
2015-03-31 02:33:53 +08:00
|
|
|
enum rpcrdma_chunktype rtype, wtype;
|
2007-09-11 01:50:42 +08:00
|
|
|
struct rpcrdma_msg *headerp;
|
|
|
|
|
2015-10-25 05:27:59 +08:00
|
|
|
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
|
if (test_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state))
|
|
|
|
return rpcrdma_bc_marshal_reply(rqst);
|
|
|
|
#endif
|
|
|
|
|
2007-09-11 01:50:42 +08:00
|
|
|
/*
|
|
|
|
* rpclen gets amount of data in first buffer, which is the
|
|
|
|
* pre-registered buffer.
|
|
|
|
*/
|
|
|
|
base = rqst->rq_svec[0].iov_base;
|
|
|
|
rpclen = rqst->rq_svec[0].iov_len;
|
|
|
|
|
2015-01-22 00:04:16 +08:00
|
|
|
headerp = rdmab_to_msg(req->rl_rdmabuf);
|
2015-01-22 00:02:13 +08:00
|
|
|
/* don't byte-swap XID, it's already done in request */
|
2007-09-11 01:50:42 +08:00
|
|
|
headerp->rm_xid = rqst->rq_xid;
|
2015-01-22 00:02:13 +08:00
|
|
|
headerp->rm_vers = rpcrdma_version;
|
|
|
|
headerp->rm_credit = cpu_to_be32(r_xprt->rx_buf.rb_max_requests);
|
|
|
|
headerp->rm_type = rdma_msg;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Chunks needed for results?
|
|
|
|
*
|
2015-08-04 01:03:58 +08:00
|
|
|
* o Read ops return data as write chunk(s), header as inline.
|
2007-09-11 01:50:42 +08:00
|
|
|
* o If the expected result is under the inline threshold, all ops
|
2015-08-04 01:04:08 +08:00
|
|
|
* return as inline.
|
2007-09-11 01:50:42 +08:00
|
|
|
* o Large non-read ops return as a single reply chunk.
|
|
|
|
*/
|
2015-08-04 01:03:58 +08:00
|
|
|
if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
|
2015-03-31 02:33:53 +08:00
|
|
|
wtype = rpcrdma_writech;
|
2015-08-04 01:03:58 +08:00
|
|
|
else if (rpcrdma_results_inline(rqst))
|
|
|
|
wtype = rpcrdma_noch;
|
2007-09-11 01:50:42 +08:00
|
|
|
else
|
2015-03-31 02:33:53 +08:00
|
|
|
wtype = rpcrdma_replych;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Chunks needed for arguments?
|
|
|
|
*
|
|
|
|
* o If the total request is under the inline threshold, all ops
|
|
|
|
* are sent as inline.
|
|
|
|
* o Large write ops transmit data as read chunk(s), header as
|
|
|
|
* inline.
|
2015-08-04 01:04:26 +08:00
|
|
|
* o Large non-write ops are sent with the entire message as a
|
|
|
|
* single read chunk (protocol 0-position special case).
|
2007-09-11 01:50:42 +08:00
|
|
|
*
|
2015-08-04 01:04:26 +08:00
|
|
|
* This assumes that the upper layer does not present a request
|
|
|
|
* that both has a data payload, and whose non-data arguments
|
|
|
|
* by themselves are larger than the inline threshold.
|
2007-09-11 01:50:42 +08:00
|
|
|
*/
|
2015-08-04 01:04:26 +08:00
|
|
|
if (rpcrdma_args_inline(rqst)) {
|
2015-03-31 02:33:53 +08:00
|
|
|
rtype = rpcrdma_noch;
|
2015-08-04 01:04:26 +08:00
|
|
|
} else if (rqst->rq_snd_buf.flags & XDRBUF_WRITE) {
|
2015-03-31 02:33:53 +08:00
|
|
|
rtype = rpcrdma_readch;
|
2015-08-04 01:04:26 +08:00
|
|
|
} else {
|
2015-08-04 01:04:45 +08:00
|
|
|
r_xprt->rx_stats.nomsg_call_count++;
|
2015-08-04 01:04:26 +08:00
|
|
|
headerp->rm_type = htonl(RDMA_NOMSG);
|
|
|
|
rtype = rpcrdma_areadch;
|
|
|
|
rpclen = 0;
|
|
|
|
}
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/* The following simplification is not true forever */
|
2015-03-31 02:33:53 +08:00
|
|
|
if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
|
|
|
|
wtype = rpcrdma_noch;
|
|
|
|
if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) {
|
2014-05-28 22:35:14 +08:00
|
|
|
dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
|
|
|
|
__func__);
|
|
|
|
return -EIO;
|
|
|
|
}
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-01-22 00:02:29 +08:00
|
|
|
hdrlen = RPCRDMA_HDRLEN_MIN;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Pull up any extra send data into the preregistered buffer.
|
|
|
|
* When padding is in use and applies to the transfer, insert
|
|
|
|
* it and change the message type.
|
|
|
|
*/
|
2015-03-31 02:33:53 +08:00
|
|
|
if (rtype == rpcrdma_noch) {
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-08-04 01:03:39 +08:00
|
|
|
rpcrdma_inline_pullup(rqst);
|
|
|
|
|
|
|
|
headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
|
|
|
|
headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
|
|
|
|
headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
|
|
|
|
/* new length after pullup */
|
|
|
|
rpclen = rqst->rq_svec[0].iov_len;
|
2015-08-04 01:04:17 +08:00
|
|
|
} else if (rtype == rpcrdma_readch)
|
|
|
|
rpclen += rpcrdma_tail_pullup(&rqst->rq_snd_buf);
|
2015-03-31 02:33:53 +08:00
|
|
|
if (rtype != rpcrdma_noch) {
|
|
|
|
hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
|
|
|
|
headerp, rtype);
|
|
|
|
wtype = rtype; /* simplify dprintk */
|
|
|
|
|
|
|
|
} else if (wtype != rpcrdma_noch) {
|
|
|
|
hdrlen = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
|
|
|
|
headerp, wtype);
|
|
|
|
}
|
2014-05-28 22:35:14 +08:00
|
|
|
if (hdrlen < 0)
|
|
|
|
return hdrlen;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-08-04 01:03:39 +08:00
|
|
|
dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd"
|
2008-10-10 03:01:52 +08:00
|
|
|
" headerp 0x%p base 0x%p lkey 0x%x\n",
|
2015-08-04 01:03:39 +08:00
|
|
|
__func__, transfertypes[wtype], hdrlen, rpclen,
|
2015-01-22 00:04:16 +08:00
|
|
|
headerp, base, rdmab_lkey(req->rl_rdmabuf));
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* initialize send_iov's - normally only two: rdma chunk header and
|
|
|
|
* single preregistered RPC header buffer, but if padding is present,
|
|
|
|
* then use a preregistered (and zeroed) pad buffer between the RPC
|
|
|
|
* header and any write data. In all non-rdma cases, any following
|
|
|
|
* data has been copied into the RPC header buffer.
|
|
|
|
*/
|
2015-01-22 00:04:16 +08:00
|
|
|
req->rl_send_iov[0].addr = rdmab_addr(req->rl_rdmabuf);
|
2007-09-11 01:50:42 +08:00
|
|
|
req->rl_send_iov[0].length = hdrlen;
|
2015-01-22 00:04:16 +08:00
|
|
|
req->rl_send_iov[0].lkey = rdmab_lkey(req->rl_rdmabuf);
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-08-04 01:04:26 +08:00
|
|
|
req->rl_niovs = 1;
|
|
|
|
if (rtype == rpcrdma_areadch)
|
|
|
|
return 0;
|
|
|
|
|
2015-01-22 00:04:08 +08:00
|
|
|
req->rl_send_iov[1].addr = rdmab_addr(req->rl_sendbuf);
|
2007-09-11 01:50:42 +08:00
|
|
|
req->rl_send_iov[1].length = rpclen;
|
2015-01-22 00:04:08 +08:00
|
|
|
req->rl_send_iov[1].lkey = rdmab_lkey(req->rl_sendbuf);
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
req->rl_niovs = 2;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Chase down a received write or reply chunklist to get length
|
|
|
|
* RDMA'd by server. See map at rpcrdma_create_chunks()! :-)
|
|
|
|
*/
|
|
|
|
static int
|
2007-10-27 01:30:49 +08:00
|
|
|
rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
|
2007-09-11 01:50:42 +08:00
|
|
|
{
|
|
|
|
unsigned int i, total_len;
|
|
|
|
struct rpcrdma_write_chunk *cur_wchunk;
|
2015-01-22 00:04:25 +08:00
|
|
|
char *base = (char *)rdmab_to_msg(rep->rr_rdmabuf);
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-01-22 00:02:13 +08:00
|
|
|
i = be32_to_cpu(**iptrp);
|
2007-09-11 01:50:42 +08:00
|
|
|
if (i > max)
|
|
|
|
return -1;
|
|
|
|
cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
|
|
|
|
total_len = 0;
|
|
|
|
while (i--) {
|
|
|
|
struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
|
|
|
|
ifdebug(FACILITY) {
|
|
|
|
u64 off;
|
2007-10-29 12:37:58 +08:00
|
|
|
xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
|
2007-09-11 01:50:42 +08:00
|
|
|
dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
|
|
|
|
__func__,
|
2015-01-22 00:02:13 +08:00
|
|
|
be32_to_cpu(seg->rs_length),
|
2007-10-30 15:44:32 +08:00
|
|
|
(unsigned long long)off,
|
2015-01-22 00:02:13 +08:00
|
|
|
be32_to_cpu(seg->rs_handle));
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
2015-01-22 00:02:13 +08:00
|
|
|
total_len += be32_to_cpu(seg->rs_length);
|
2007-09-11 01:50:42 +08:00
|
|
|
++cur_wchunk;
|
|
|
|
}
|
|
|
|
/* check and adjust for properly terminated write chunk */
|
|
|
|
if (wrchunk) {
|
2007-10-29 12:37:58 +08:00
|
|
|
__be32 *w = (__be32 *) cur_wchunk;
|
2007-09-11 01:50:42 +08:00
|
|
|
if (*w++ != xdr_zero)
|
|
|
|
return -1;
|
|
|
|
cur_wchunk = (struct rpcrdma_write_chunk *) w;
|
|
|
|
}
|
2015-01-22 00:04:25 +08:00
|
|
|
if ((char *)cur_wchunk > base + rep->rr_len)
|
2007-09-11 01:50:42 +08:00
|
|
|
return -1;
|
|
|
|
|
2007-10-29 12:37:58 +08:00
|
|
|
*iptrp = (__be32 *) cur_wchunk;
|
2007-09-11 01:50:42 +08:00
|
|
|
return total_len;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scatter inline received data back into provided iov's.
|
|
|
|
*/
|
|
|
|
static void
|
2008-10-10 03:01:11 +08:00
|
|
|
rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
|
2007-09-11 01:50:42 +08:00
|
|
|
{
|
|
|
|
int i, npages, curlen, olen;
|
|
|
|
char *destp;
|
2011-02-10 03:45:28 +08:00
|
|
|
struct page **ppages;
|
|
|
|
int page_base;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
curlen = rqst->rq_rcv_buf.head[0].iov_len;
|
|
|
|
if (curlen > copy_len) { /* write chunk header fixup */
|
|
|
|
curlen = copy_len;
|
|
|
|
rqst->rq_rcv_buf.head[0].iov_len = curlen;
|
|
|
|
}
|
|
|
|
|
|
|
|
dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
|
|
|
|
__func__, srcp, copy_len, curlen);
|
|
|
|
|
|
|
|
/* Shift pointer for first receive segment only */
|
|
|
|
rqst->rq_rcv_buf.head[0].iov_base = srcp;
|
|
|
|
srcp += curlen;
|
|
|
|
copy_len -= curlen;
|
|
|
|
|
|
|
|
olen = copy_len;
|
|
|
|
i = 0;
|
|
|
|
rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
|
2011-02-10 03:45:28 +08:00
|
|
|
page_base = rqst->rq_rcv_buf.page_base;
|
|
|
|
ppages = rqst->rq_rcv_buf.pages + (page_base >> PAGE_SHIFT);
|
|
|
|
page_base &= ~PAGE_MASK;
|
|
|
|
|
2007-09-11 01:50:42 +08:00
|
|
|
if (copy_len && rqst->rq_rcv_buf.page_len) {
|
2011-02-10 03:45:28 +08:00
|
|
|
npages = PAGE_ALIGN(page_base +
|
2007-09-11 01:50:42 +08:00
|
|
|
rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
|
|
|
|
for (; i < npages; i++) {
|
2011-02-10 03:45:28 +08:00
|
|
|
curlen = PAGE_SIZE - page_base;
|
2007-09-11 01:50:42 +08:00
|
|
|
if (curlen > copy_len)
|
|
|
|
curlen = copy_len;
|
|
|
|
dprintk("RPC: %s: page %d"
|
|
|
|
" srcp 0x%p len %d curlen %d\n",
|
|
|
|
__func__, i, srcp, copy_len, curlen);
|
2011-11-25 23:14:40 +08:00
|
|
|
destp = kmap_atomic(ppages[i]);
|
2011-02-10 03:45:28 +08:00
|
|
|
memcpy(destp + page_base, srcp, curlen);
|
|
|
|
flush_dcache_page(ppages[i]);
|
2011-11-25 23:14:40 +08:00
|
|
|
kunmap_atomic(destp);
|
2007-09-11 01:50:42 +08:00
|
|
|
srcp += curlen;
|
|
|
|
copy_len -= curlen;
|
|
|
|
if (copy_len == 0)
|
|
|
|
break;
|
2011-02-10 03:45:28 +08:00
|
|
|
page_base = 0;
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
2014-03-13 00:51:30 +08:00
|
|
|
}
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
|
|
|
|
curlen = copy_len;
|
|
|
|
if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
|
|
|
|
curlen = rqst->rq_rcv_buf.tail[0].iov_len;
|
|
|
|
if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
|
2009-03-12 02:37:55 +08:00
|
|
|
memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
|
2007-09-11 01:50:42 +08:00
|
|
|
dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
|
|
|
|
__func__, srcp, copy_len, curlen);
|
|
|
|
rqst->rq_rcv_buf.tail[0].iov_len = curlen;
|
|
|
|
copy_len -= curlen; ++i;
|
|
|
|
} else
|
|
|
|
rqst->rq_rcv_buf.tail[0].iov_len = 0;
|
|
|
|
|
2008-10-10 03:01:11 +08:00
|
|
|
if (pad) {
|
|
|
|
/* implicit padding on terminal chunk */
|
|
|
|
unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
|
|
|
|
while (pad--)
|
|
|
|
p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
|
|
|
|
}
|
|
|
|
|
2007-09-11 01:50:42 +08:00
|
|
|
if (copy_len)
|
|
|
|
dprintk("RPC: %s: %d bytes in"
|
|
|
|
" %d extra segments (%d lost)\n",
|
|
|
|
__func__, olen, i, copy_len);
|
|
|
|
|
|
|
|
/* TBD avoid a warning from call_decode() */
|
|
|
|
rqst->rq_private_buf = rqst->rq_rcv_buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
2014-05-28 22:32:17 +08:00
|
|
|
rpcrdma_connect_worker(struct work_struct *work)
|
2007-09-11 01:50:42 +08:00
|
|
|
{
|
2014-05-28 22:32:17 +08:00
|
|
|
struct rpcrdma_ep *ep =
|
|
|
|
container_of(work, struct rpcrdma_ep, rep_connect_worker.work);
|
2015-01-22 00:03:11 +08:00
|
|
|
struct rpcrdma_xprt *r_xprt =
|
|
|
|
container_of(ep, struct rpcrdma_xprt, rx_ep);
|
|
|
|
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
spin_lock_bh(&xprt->transport_lock);
|
2008-10-10 03:00:40 +08:00
|
|
|
if (++xprt->connect_cookie == 0) /* maintain a reserved value */
|
|
|
|
++xprt->connect_cookie;
|
2007-09-11 01:50:42 +08:00
|
|
|
if (ep->rep_connected > 0) {
|
|
|
|
if (!xprt_test_and_set_connected(xprt))
|
|
|
|
xprt_wake_pending_tasks(xprt, 0);
|
|
|
|
} else {
|
|
|
|
if (xprt_test_and_clear_connected(xprt))
|
2008-10-10 03:01:21 +08:00
|
|
|
xprt_wake_pending_tasks(xprt, -ENOTCONN);
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
|
|
|
}
|
|
|
|
|
2015-10-25 05:28:08 +08:00
|
|
|
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
|
/* By convention, backchannel calls arrive via rdma_msg type
|
|
|
|
* messages, and never populate the chunk lists. This makes
|
|
|
|
* the RPC/RDMA header small and fixed in size, so it is
|
|
|
|
* straightforward to check the RPC header's direction field.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
rpcrdma_is_bcall(struct rpcrdma_msg *headerp)
|
|
|
|
{
|
|
|
|
__be32 *p = (__be32 *)headerp;
|
|
|
|
|
|
|
|
if (headerp->rm_type != rdma_msg)
|
|
|
|
return false;
|
|
|
|
if (headerp->rm_body.rm_chunks[0] != xdr_zero)
|
|
|
|
return false;
|
|
|
|
if (headerp->rm_body.rm_chunks[1] != xdr_zero)
|
|
|
|
return false;
|
|
|
|
if (headerp->rm_body.rm_chunks[2] != xdr_zero)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* sanity */
|
|
|
|
if (p[7] != headerp->rm_xid)
|
|
|
|
return false;
|
|
|
|
/* call direction */
|
|
|
|
if (p[8] != cpu_to_be32(RPC_CALL))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
|
|
|
|
|
2014-05-28 22:32:17 +08:00
|
|
|
/*
|
|
|
|
* This function is called when an async event is posted to
|
|
|
|
* the connection which changes the connection state. All it
|
|
|
|
* does at this point is mark the connection up/down, the rpc
|
|
|
|
* timers do the rest.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rpcrdma_conn_func(struct rpcrdma_ep *ep)
|
|
|
|
{
|
|
|
|
schedule_delayed_work(&ep->rep_connect_worker, 0);
|
|
|
|
}
|
|
|
|
|
2015-10-25 05:27:10 +08:00
|
|
|
/* Process received RPC/RDMA messages.
|
|
|
|
*
|
2007-09-11 01:50:42 +08:00
|
|
|
* Errors must result in the RPC task either being awakened, or
|
|
|
|
* allowed to timeout, to discover the errors at that time.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
rpcrdma_reply_handler(struct rpcrdma_rep *rep)
|
|
|
|
{
|
|
|
|
struct rpcrdma_msg *headerp;
|
|
|
|
struct rpcrdma_req *req;
|
|
|
|
struct rpc_rqst *rqst;
|
2015-05-26 23:51:37 +08:00
|
|
|
struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
|
|
|
|
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
|
2007-10-29 12:37:58 +08:00
|
|
|
__be32 *iptr;
|
2015-02-12 23:14:51 +08:00
|
|
|
int rdmalen, status;
|
2014-05-28 22:34:57 +08:00
|
|
|
unsigned long cwnd;
|
2015-02-12 23:14:51 +08:00
|
|
|
u32 credits;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-10-25 05:26:54 +08:00
|
|
|
dprintk("RPC: %s: incoming rep %p\n", __func__, rep);
|
|
|
|
|
|
|
|
if (rep->rr_len == RPCRDMA_BAD_LEN)
|
|
|
|
goto out_badstatus;
|
|
|
|
if (rep->rr_len < RPCRDMA_HDRLEN_MIN)
|
|
|
|
goto out_shortreply;
|
|
|
|
|
2015-01-22 00:04:25 +08:00
|
|
|
headerp = rdmab_to_msg(rep->rr_rdmabuf);
|
2015-10-25 05:26:54 +08:00
|
|
|
if (headerp->rm_vers != rpcrdma_version)
|
|
|
|
goto out_badversion;
|
2015-10-25 05:28:08 +08:00
|
|
|
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
|
if (rpcrdma_is_bcall(headerp))
|
|
|
|
goto out_bcall;
|
|
|
|
#endif
|
2007-09-11 01:50:42 +08:00
|
|
|
|
2015-10-25 05:27:10 +08:00
|
|
|
/* Match incoming rpcrdma_rep to an rpcrdma_req to
|
|
|
|
* get context for handling any incoming chunks.
|
|
|
|
*/
|
|
|
|
spin_lock_bh(&xprt->transport_lock);
|
2007-09-11 01:50:42 +08:00
|
|
|
rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
|
2015-10-25 05:26:54 +08:00
|
|
|
if (!rqst)
|
|
|
|
goto out_nomatch;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
req = rpcr_to_rdmar(rqst);
|
2015-10-25 05:26:54 +08:00
|
|
|
if (req->rl_reply)
|
|
|
|
goto out_duplicate;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
xprtrdma: Invalidate in the RPC reply handler
There is a window between the time the RPC reply handler wakes the
waiting RPC task and when xprt_release() invokes ops->buf_free.
During this time, memory regions containing the data payload may
still be accessed by a broken or malicious server, but the RPC
application has already been allowed access to the memory containing
the RPC request's data payloads.
The server should be fenced from client memory containing RPC data
payloads _before_ the RPC application is allowed to continue.
This change also more strongly enforces send queue accounting. There
is a maximum number of RPC calls allowed to be outstanding. When an
RPC/RDMA transport is set up, just enough send queue resources are
allocated to handle registration, Send, and invalidation WRs for
each those RPCs at the same time.
Before, additional RPC calls could be dispatched while invalidation
WRs were still consuming send WQEs. When invalidation WRs backed
up, dispatching additional RPCs resulted in a send queue overrun.
Now, the reply handler prevents RPC dispatch until invalidation is
complete. This prevents RPC call dispatch until there are enough
send queue resources to proceed.
Still to do: If an RPC exits early (say, ^C), the reply handler has
no opportunity to perform invalidation. Currently, xprt_rdma_free()
still frees remaining RDMA resources, which could deadlock.
Additional changes are needed to handle invalidation properly in this
case.
Reported-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2015-12-17 06:23:11 +08:00
|
|
|
/* Sanity checking has passed. We are now committed
|
|
|
|
* to complete this transaction.
|
|
|
|
*/
|
|
|
|
list_del_init(&rqst->rq_list);
|
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
2016-03-05 00:27:43 +08:00
|
|
|
dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n",
|
|
|
|
__func__, rep, req, be32_to_cpu(headerp->rm_xid));
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/* from here on, the reply is no longer an orphan */
|
|
|
|
req->rl_reply = rep;
|
2014-05-28 22:34:41 +08:00
|
|
|
xprt->reestablish_timeout = 0;
|
2007-09-11 01:50:42 +08:00
|
|
|
|
|
|
|
/* check for expected message types */
|
|
|
|
/* The order of some of these tests is important. */
|
|
|
|
switch (headerp->rm_type) {
|
2015-01-22 00:02:13 +08:00
|
|
|
case rdma_msg:
|
2007-09-11 01:50:42 +08:00
|
|
|
/* never expect read chunks */
|
|
|
|
/* never expect reply chunks (two ways to check) */
|
|
|
|
/* never expect write chunks without having offered RDMA */
|
|
|
|
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
|
|
|
|
(headerp->rm_body.rm_chunks[1] == xdr_zero &&
|
|
|
|
headerp->rm_body.rm_chunks[2] != xdr_zero) ||
|
|
|
|
(headerp->rm_body.rm_chunks[1] != xdr_zero &&
|
|
|
|
req->rl_nchunks == 0))
|
|
|
|
goto badheader;
|
|
|
|
if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
|
|
|
|
/* count any expected write chunks in read reply */
|
|
|
|
/* start at write chunk array count */
|
|
|
|
iptr = &headerp->rm_body.rm_chunks[2];
|
|
|
|
rdmalen = rpcrdma_count_chunks(rep,
|
|
|
|
req->rl_nchunks, 1, &iptr);
|
|
|
|
/* check for validity, and no reply chunk after */
|
|
|
|
if (rdmalen < 0 || *iptr++ != xdr_zero)
|
|
|
|
goto badheader;
|
|
|
|
rep->rr_len -=
|
|
|
|
((unsigned char *)iptr - (unsigned char *)headerp);
|
|
|
|
status = rep->rr_len + rdmalen;
|
|
|
|
r_xprt->rx_stats.total_rdma_reply += rdmalen;
|
2008-10-10 03:01:11 +08:00
|
|
|
/* special case - last chunk may omit padding */
|
|
|
|
if (rdmalen &= 3) {
|
|
|
|
rdmalen = 4 - rdmalen;
|
|
|
|
status += rdmalen;
|
|
|
|
}
|
2007-09-11 01:50:42 +08:00
|
|
|
} else {
|
|
|
|
/* else ordinary inline */
|
2008-10-10 03:01:11 +08:00
|
|
|
rdmalen = 0;
|
2015-01-22 00:02:29 +08:00
|
|
|
iptr = (__be32 *)((unsigned char *)headerp +
|
|
|
|
RPCRDMA_HDRLEN_MIN);
|
|
|
|
rep->rr_len -= RPCRDMA_HDRLEN_MIN;
|
2007-09-11 01:50:42 +08:00
|
|
|
status = rep->rr_len;
|
|
|
|
}
|
|
|
|
/* Fix up the rpc results for upper layer */
|
2008-10-10 03:01:11 +08:00
|
|
|
rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
|
2007-09-11 01:50:42 +08:00
|
|
|
break;
|
|
|
|
|
2015-01-22 00:02:13 +08:00
|
|
|
case rdma_nomsg:
|
2007-09-11 01:50:42 +08:00
|
|
|
/* never expect read or write chunks, always reply chunks */
|
|
|
|
if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
|
|
|
|
headerp->rm_body.rm_chunks[1] != xdr_zero ||
|
|
|
|
headerp->rm_body.rm_chunks[2] != xdr_one ||
|
|
|
|
req->rl_nchunks == 0)
|
|
|
|
goto badheader;
|
2015-01-22 00:02:29 +08:00
|
|
|
iptr = (__be32 *)((unsigned char *)headerp +
|
|
|
|
RPCRDMA_HDRLEN_MIN);
|
2007-09-11 01:50:42 +08:00
|
|
|
rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
|
|
|
|
if (rdmalen < 0)
|
|
|
|
goto badheader;
|
|
|
|
r_xprt->rx_stats.total_rdma_reply += rdmalen;
|
|
|
|
/* Reply chunk buffer already is the reply vector - no fixup. */
|
|
|
|
status = rdmalen;
|
|
|
|
break;
|
|
|
|
|
|
|
|
badheader:
|
|
|
|
default:
|
|
|
|
dprintk("%s: invalid rpcrdma reply header (type %d):"
|
|
|
|
" chunks[012] == %d %d %d"
|
|
|
|
" expected chunks <= %d\n",
|
2015-01-22 00:02:13 +08:00
|
|
|
__func__, be32_to_cpu(headerp->rm_type),
|
2007-09-11 01:50:42 +08:00
|
|
|
headerp->rm_body.rm_chunks[0],
|
|
|
|
headerp->rm_body.rm_chunks[1],
|
|
|
|
headerp->rm_body.rm_chunks[2],
|
|
|
|
req->rl_nchunks);
|
|
|
|
status = -EIO;
|
|
|
|
r_xprt->rx_stats.bad_reply_count++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
xprtrdma: Invalidate in the RPC reply handler
There is a window between the time the RPC reply handler wakes the
waiting RPC task and when xprt_release() invokes ops->buf_free.
During this time, memory regions containing the data payload may
still be accessed by a broken or malicious server, but the RPC
application has already been allowed access to the memory containing
the RPC request's data payloads.
The server should be fenced from client memory containing RPC data
payloads _before_ the RPC application is allowed to continue.
This change also more strongly enforces send queue accounting. There
is a maximum number of RPC calls allowed to be outstanding. When an
RPC/RDMA transport is set up, just enough send queue resources are
allocated to handle registration, Send, and invalidation WRs for
each those RPCs at the same time.
Before, additional RPC calls could be dispatched while invalidation
WRs were still consuming send WQEs. When invalidation WRs backed
up, dispatching additional RPCs resulted in a send queue overrun.
Now, the reply handler prevents RPC dispatch until invalidation is
complete. This prevents RPC call dispatch until there are enough
send queue resources to proceed.
Still to do: If an RPC exits early (say, ^C), the reply handler has
no opportunity to perform invalidation. Currently, xprt_rdma_free()
still frees remaining RDMA resources, which could deadlock.
Additional changes are needed to handle invalidation properly in this
case.
Reported-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2015-12-17 06:23:11 +08:00
|
|
|
/* Invalidate and flush the data payloads before waking the
|
|
|
|
* waiting application. This guarantees the memory region is
|
|
|
|
* properly fenced from the server before the application
|
|
|
|
* accesses the data. It also ensures proper send flow
|
|
|
|
* control: waking the next RPC waits until this RPC has
|
|
|
|
* relinquished all its Send Queue entries.
|
|
|
|
*/
|
|
|
|
if (req->rl_nchunks)
|
|
|
|
r_xprt->rx_ia.ri_ops->ro_unmap_sync(r_xprt, req);
|
|
|
|
|
2015-01-22 00:03:02 +08:00
|
|
|
credits = be32_to_cpu(headerp->rm_credit);
|
|
|
|
if (credits == 0)
|
|
|
|
credits = 1; /* don't deadlock */
|
|
|
|
else if (credits > r_xprt->rx_buf.rb_max_requests)
|
|
|
|
credits = r_xprt->rx_buf.rb_max_requests;
|
|
|
|
|
xprtrdma: Invalidate in the RPC reply handler
There is a window between the time the RPC reply handler wakes the
waiting RPC task and when xprt_release() invokes ops->buf_free.
During this time, memory regions containing the data payload may
still be accessed by a broken or malicious server, but the RPC
application has already been allowed access to the memory containing
the RPC request's data payloads.
The server should be fenced from client memory containing RPC data
payloads _before_ the RPC application is allowed to continue.
This change also more strongly enforces send queue accounting. There
is a maximum number of RPC calls allowed to be outstanding. When an
RPC/RDMA transport is set up, just enough send queue resources are
allocated to handle registration, Send, and invalidation WRs for
each those RPCs at the same time.
Before, additional RPC calls could be dispatched while invalidation
WRs were still consuming send WQEs. When invalidation WRs backed
up, dispatching additional RPCs resulted in a send queue overrun.
Now, the reply handler prevents RPC dispatch until invalidation is
complete. This prevents RPC call dispatch until there are enough
send queue resources to proceed.
Still to do: If an RPC exits early (say, ^C), the reply handler has
no opportunity to perform invalidation. Currently, xprt_rdma_free()
still frees remaining RDMA resources, which could deadlock.
Additional changes are needed to handle invalidation properly in this
case.
Reported-by: Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Tested-by: Devesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
2015-12-17 06:23:11 +08:00
|
|
|
spin_lock_bh(&xprt->transport_lock);
|
2014-05-28 22:34:57 +08:00
|
|
|
cwnd = xprt->cwnd;
|
2015-01-22 00:03:02 +08:00
|
|
|
xprt->cwnd = credits << RPC_CWNDSHIFT;
|
2014-05-28 22:34:57 +08:00
|
|
|
if (xprt->cwnd > cwnd)
|
|
|
|
xprt_release_rqst_cong(rqst->rq_task);
|
|
|
|
|
2015-10-25 05:26:54 +08:00
|
|
|
xprt_complete_rqst(rqst->rq_task, status);
|
2015-10-25 05:27:10 +08:00
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
2007-09-11 01:50:42 +08:00
|
|
|
dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
|
|
|
|
__func__, xprt, rqst, status);
|
2015-10-25 05:26:54 +08:00
|
|
|
return;
|
|
|
|
|
|
|
|
out_badstatus:
|
|
|
|
rpcrdma_recv_buffer_put(rep);
|
|
|
|
if (r_xprt->rx_ep.rep_connected == 1) {
|
|
|
|
r_xprt->rx_ep.rep_connected = -EIO;
|
|
|
|
rpcrdma_conn_func(&r_xprt->rx_ep);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
2015-10-25 05:28:08 +08:00
|
|
|
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
|
|
|
|
out_bcall:
|
|
|
|
rpcrdma_bc_receive_call(r_xprt, rep);
|
|
|
|
return;
|
|
|
|
#endif
|
|
|
|
|
2015-10-25 05:26:54 +08:00
|
|
|
out_shortreply:
|
|
|
|
dprintk("RPC: %s: short/invalid reply\n", __func__);
|
|
|
|
goto repost;
|
|
|
|
|
|
|
|
out_badversion:
|
|
|
|
dprintk("RPC: %s: invalid version %d\n",
|
|
|
|
__func__, be32_to_cpu(headerp->rm_vers));
|
|
|
|
goto repost;
|
|
|
|
|
|
|
|
out_nomatch:
|
2015-10-25 05:27:10 +08:00
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
2015-10-25 05:26:54 +08:00
|
|
|
dprintk("RPC: %s: no match for incoming xid 0x%08x len %d\n",
|
|
|
|
__func__, be32_to_cpu(headerp->rm_xid),
|
|
|
|
rep->rr_len);
|
|
|
|
goto repost;
|
|
|
|
|
|
|
|
out_duplicate:
|
2015-10-25 05:27:10 +08:00
|
|
|
spin_unlock_bh(&xprt->transport_lock);
|
2015-10-25 05:26:54 +08:00
|
|
|
dprintk("RPC: %s: "
|
|
|
|
"duplicate reply %p to RPC request %p: xid 0x%08x\n",
|
|
|
|
__func__, rep, req, be32_to_cpu(headerp->rm_xid));
|
|
|
|
|
|
|
|
repost:
|
|
|
|
r_xprt->rx_stats.bad_reply_count++;
|
|
|
|
if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
|
|
|
|
rpcrdma_recv_buffer_put(rep);
|
2007-09-11 01:50:42 +08:00
|
|
|
}
|