2018-05-08 03:26:55 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
|
2007-12-13 06:13:25 +08:00
|
|
|
/*
|
2018-05-08 03:27:21 +08:00
|
|
|
* Copyright (c) 2016-2018 Oracle. All rights reserved.
|
2014-05-29 04:12:01 +08:00
|
|
|
* Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
|
2007-12-13 06:13:25 +08:00
|
|
|
* Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
|
|
|
|
*
|
|
|
|
* This software is available to you under a choice of one of two
|
|
|
|
* licenses. You may choose to be licensed under the terms of the GNU
|
|
|
|
* General Public License (GPL) Version 2, available from the file
|
|
|
|
* COPYING in the main directory of this source tree, or the BSD-type
|
|
|
|
* license below:
|
|
|
|
*
|
|
|
|
* Redistribution and use in source and binary forms, with or without
|
|
|
|
* modification, are permitted provided that the following conditions
|
|
|
|
* are met:
|
|
|
|
*
|
|
|
|
* Redistributions of source code must retain the above copyright
|
|
|
|
* notice, this list of conditions and the following disclaimer.
|
|
|
|
*
|
|
|
|
* Redistributions in binary form must reproduce the above
|
|
|
|
* copyright notice, this list of conditions and the following
|
|
|
|
* disclaimer in the documentation and/or other materials provided
|
|
|
|
* with the distribution.
|
|
|
|
*
|
|
|
|
* Neither the name of the Network Appliance, Inc. nor the names of
|
|
|
|
* its contributors may be used to endorse or promote products
|
|
|
|
* derived from this software without specific prior written
|
|
|
|
* permission.
|
|
|
|
*
|
|
|
|
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* Author: Tom Tucker <tom@opengridcomputing.com>
|
|
|
|
*/
|
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
/* Operation
|
|
|
|
*
|
|
|
|
* The main entry point is svc_rdma_sendto. This is called by the
|
|
|
|
* RPC server when an RPC Reply is ready to be transmitted to a client.
|
|
|
|
*
|
|
|
|
* The passed-in svc_rqst contains a struct xdr_buf which holds an
|
|
|
|
* XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA
|
|
|
|
* transport header, post all Write WRs needed for this Reply, then post
|
|
|
|
* a Send WR conveying the transport header and the RPC message itself to
|
|
|
|
* the client.
|
|
|
|
*
|
|
|
|
* svc_rdma_sendto must fully transmit the Reply before returning, as
|
|
|
|
* the svc_rqst will be recycled as soon as sendto returns. Remaining
|
|
|
|
* resources referred to by the svc_rqst are also recycled at that time.
|
|
|
|
* Therefore any resources that must remain longer must be detached
|
|
|
|
* from the svc_rqst and released later.
|
|
|
|
*
|
|
|
|
* Page Management
|
|
|
|
*
|
|
|
|
* The I/O that performs Reply transmission is asynchronous, and may
|
|
|
|
* complete well after sendto returns. Thus pages under I/O must be
|
|
|
|
* removed from the svc_rqst before sendto returns.
|
|
|
|
*
|
|
|
|
* The logic here depends on Send Queue and completion ordering. Since
|
|
|
|
* the Send WR is always posted last, it will always complete last. Thus
|
|
|
|
* when it completes, it is guaranteed that all previous Write WRs have
|
|
|
|
* also completed.
|
|
|
|
*
|
|
|
|
* Write WRs are constructed and posted. Each Write segment gets its own
|
|
|
|
* svc_rdma_rw_ctxt, allowing the Write completion handler to find and
|
|
|
|
* DMA-unmap the pages under I/O for that Write segment. The Write
|
|
|
|
* completion handler does not release any pages.
|
|
|
|
*
|
2018-05-08 03:28:04 +08:00
|
|
|
* When the Send WR is constructed, it also gets its own svc_rdma_send_ctxt.
|
2017-04-10 01:06:25 +08:00
|
|
|
* The ownership of all of the Reply's pages are transferred into that
|
|
|
|
* ctxt, the Send WR is posted, and sendto returns.
|
|
|
|
*
|
2018-05-08 03:28:04 +08:00
|
|
|
* The svc_rdma_send_ctxt is presented when the Send WR completes. The
|
2017-04-10 01:06:25 +08:00
|
|
|
* Send completion handler finally releases the Reply's pages.
|
|
|
|
*
|
|
|
|
* This mechanism also assumes that completions on the transport's Send
|
|
|
|
* Completion Queue do not run in parallel. Otherwise a Write completion
|
|
|
|
* and Send completion running at the same time could release pages that
|
|
|
|
* are still DMA-mapped.
|
|
|
|
*
|
|
|
|
* Error Handling
|
|
|
|
*
|
|
|
|
* - If the Send WR is posted successfully, it will either complete
|
|
|
|
* successfully, or get flushed. Either way, the Send completion
|
|
|
|
* handler releases the Reply's pages.
|
|
|
|
* - If the Send WR cannot be not posted, the forward path releases
|
|
|
|
* the Reply's pages.
|
|
|
|
*
|
|
|
|
* This handles the case, without the use of page reference counting,
|
|
|
|
* where two different Write segments send portions of the same page.
|
|
|
|
*/
|
|
|
|
|
2007-12-13 06:13:25 +08:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <asm/unaligned.h>
|
2018-05-08 03:27:11 +08:00
|
|
|
|
2007-12-13 06:13:25 +08:00
|
|
|
#include <rdma/ib_verbs.h>
|
|
|
|
#include <rdma/rdma_cm.h>
|
2018-05-08 03:27:11 +08:00
|
|
|
|
|
|
|
#include <linux/sunrpc/debug.h>
|
|
|
|
#include <linux/sunrpc/rpc_rdma.h>
|
2007-12-13 06:13:25 +08:00
|
|
|
#include <linux/sunrpc/svc_rdma.h>
|
|
|
|
|
2018-05-08 03:27:11 +08:00
|
|
|
#include "xprt_rdma.h"
|
|
|
|
#include <trace/events/rpcrdma.h>
|
|
|
|
|
2007-12-13 06:13:25 +08:00
|
|
|
#define RPCDBG_FACILITY RPCDBG_SVCXPRT
|
|
|
|
|
2018-05-08 03:28:04 +08:00
|
|
|
static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc);
|
|
|
|
|
|
|
|
static inline struct svc_rdma_send_ctxt *
|
|
|
|
svc_rdma_next_send_ctxt(struct list_head *list)
|
|
|
|
{
|
|
|
|
return list_first_entry_or_null(list, struct svc_rdma_send_ctxt,
|
|
|
|
sc_list);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct svc_rdma_send_ctxt *
|
|
|
|
svc_rdma_send_ctxt_alloc(struct svcxprt_rdma *rdma)
|
|
|
|
{
|
|
|
|
struct svc_rdma_send_ctxt *ctxt;
|
2018-05-08 03:28:25 +08:00
|
|
|
dma_addr_t addr;
|
|
|
|
void *buffer;
|
2018-05-08 03:28:09 +08:00
|
|
|
size_t size;
|
2018-05-08 03:28:04 +08:00
|
|
|
int i;
|
|
|
|
|
2018-05-08 03:28:09 +08:00
|
|
|
size = sizeof(*ctxt);
|
|
|
|
size += rdma->sc_max_send_sges * sizeof(struct ib_sge);
|
|
|
|
ctxt = kmalloc(size, GFP_KERNEL);
|
2018-05-08 03:28:04 +08:00
|
|
|
if (!ctxt)
|
2018-05-08 03:28:25 +08:00
|
|
|
goto fail0;
|
|
|
|
buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
|
|
|
|
if (!buffer)
|
|
|
|
goto fail1;
|
|
|
|
addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
|
|
|
|
rdma->sc_max_req_size, DMA_TO_DEVICE);
|
|
|
|
if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
|
|
|
|
goto fail2;
|
2018-05-08 03:28:04 +08:00
|
|
|
|
|
|
|
ctxt->sc_send_wr.next = NULL;
|
|
|
|
ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe;
|
|
|
|
ctxt->sc_send_wr.sg_list = ctxt->sc_sges;
|
|
|
|
ctxt->sc_send_wr.send_flags = IB_SEND_SIGNALED;
|
2018-05-08 03:28:25 +08:00
|
|
|
ctxt->sc_cqe.done = svc_rdma_wc_send;
|
|
|
|
ctxt->sc_xprt_buf = buffer;
|
|
|
|
ctxt->sc_sges[0].addr = addr;
|
|
|
|
|
2018-05-08 03:28:09 +08:00
|
|
|
for (i = 0; i < rdma->sc_max_send_sges; i++)
|
2018-05-08 03:28:04 +08:00
|
|
|
ctxt->sc_sges[i].lkey = rdma->sc_pd->local_dma_lkey;
|
|
|
|
return ctxt;
|
2018-05-08 03:28:25 +08:00
|
|
|
|
|
|
|
fail2:
|
|
|
|
kfree(buffer);
|
|
|
|
fail1:
|
|
|
|
kfree(ctxt);
|
|
|
|
fail0:
|
|
|
|
return NULL;
|
2018-05-08 03:28:04 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* svc_rdma_send_ctxts_destroy - Release all send_ctxt's for an xprt
|
|
|
|
* @rdma: svcxprt_rdma being torn down
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void svc_rdma_send_ctxts_destroy(struct svcxprt_rdma *rdma)
|
|
|
|
{
|
|
|
|
struct svc_rdma_send_ctxt *ctxt;
|
|
|
|
|
|
|
|
while ((ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts))) {
|
|
|
|
list_del(&ctxt->sc_list);
|
2018-05-08 03:28:25 +08:00
|
|
|
ib_dma_unmap_single(rdma->sc_pd->device,
|
|
|
|
ctxt->sc_sges[0].addr,
|
|
|
|
rdma->sc_max_req_size,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
kfree(ctxt->sc_xprt_buf);
|
2018-05-08 03:28:04 +08:00
|
|
|
kfree(ctxt);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* svc_rdma_send_ctxt_get - Get a free send_ctxt
|
|
|
|
* @rdma: controlling svcxprt_rdma
|
|
|
|
*
|
|
|
|
* Returns a ready-to-use send_ctxt, or NULL if none are
|
|
|
|
* available and a fresh one cannot be allocated.
|
|
|
|
*/
|
|
|
|
struct svc_rdma_send_ctxt *svc_rdma_send_ctxt_get(struct svcxprt_rdma *rdma)
|
|
|
|
{
|
|
|
|
struct svc_rdma_send_ctxt *ctxt;
|
|
|
|
|
|
|
|
spin_lock(&rdma->sc_send_lock);
|
|
|
|
ctxt = svc_rdma_next_send_ctxt(&rdma->sc_send_ctxts);
|
|
|
|
if (!ctxt)
|
|
|
|
goto out_empty;
|
|
|
|
list_del(&ctxt->sc_list);
|
|
|
|
spin_unlock(&rdma->sc_send_lock);
|
|
|
|
|
|
|
|
out:
|
|
|
|
ctxt->sc_send_wr.num_sge = 0;
|
2018-05-08 03:28:25 +08:00
|
|
|
ctxt->sc_cur_sge_no = 0;
|
2018-05-08 03:28:04 +08:00
|
|
|
ctxt->sc_page_count = 0;
|
|
|
|
return ctxt;
|
|
|
|
|
|
|
|
out_empty:
|
|
|
|
spin_unlock(&rdma->sc_send_lock);
|
|
|
|
ctxt = svc_rdma_send_ctxt_alloc(rdma);
|
|
|
|
if (!ctxt)
|
|
|
|
return NULL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* svc_rdma_send_ctxt_put - Return send_ctxt to free list
|
|
|
|
* @rdma: controlling svcxprt_rdma
|
|
|
|
* @ctxt: object to return to the free list
|
|
|
|
*
|
|
|
|
* Pages left in sc_pages are DMA unmapped and released.
|
|
|
|
*/
|
|
|
|
void svc_rdma_send_ctxt_put(struct svcxprt_rdma *rdma,
|
|
|
|
struct svc_rdma_send_ctxt *ctxt)
|
|
|
|
{
|
|
|
|
struct ib_device *device = rdma->sc_cm_id->device;
|
|
|
|
unsigned int i;
|
|
|
|
|
2018-05-08 03:28:25 +08:00
|
|
|
/* The first SGE contains the transport header, which
|
|
|
|
* remains mapped until @ctxt is destroyed.
|
|
|
|
*/
|
2019-10-04 21:58:20 +08:00
|
|
|
for (i = 1; i < ctxt->sc_send_wr.num_sge; i++) {
|
2018-05-08 03:28:04 +08:00
|
|
|
ib_dma_unmap_page(device,
|
|
|
|
ctxt->sc_sges[i].addr,
|
|
|
|
ctxt->sc_sges[i].length,
|
|
|
|
DMA_TO_DEVICE);
|
2019-10-04 21:58:20 +08:00
|
|
|
trace_svcrdma_dma_unmap_page(rdma,
|
|
|
|
ctxt->sc_sges[i].addr,
|
|
|
|
ctxt->sc_sges[i].length);
|
|
|
|
}
|
2018-05-08 03:28:04 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ctxt->sc_page_count; ++i)
|
|
|
|
put_page(ctxt->sc_pages[i]);
|
|
|
|
|
|
|
|
spin_lock(&rdma->sc_send_lock);
|
|
|
|
list_add(&ctxt->sc_list, &rdma->sc_send_ctxts);
|
|
|
|
spin_unlock(&rdma->sc_send_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* svc_rdma_wc_send - Invoked by RDMA provider for each polled Send WC
|
|
|
|
* @cq: Completion Queue context
|
|
|
|
* @wc: Work Completion object
|
|
|
|
*
|
|
|
|
* NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
|
|
|
|
* the Send completion handler could be running.
|
|
|
|
*/
|
|
|
|
static void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
|
|
|
|
{
|
|
|
|
struct svcxprt_rdma *rdma = cq->cq_context;
|
|
|
|
struct ib_cqe *cqe = wc->wr_cqe;
|
|
|
|
struct svc_rdma_send_ctxt *ctxt;
|
|
|
|
|
|
|
|
trace_svcrdma_wc_send(wc);
|
|
|
|
|
|
|
|
atomic_inc(&rdma->sc_sq_avail);
|
|
|
|
wake_up(&rdma->sc_send_wait);
|
|
|
|
|
|
|
|
ctxt = container_of(cqe, struct svc_rdma_send_ctxt, sc_cqe);
|
|
|
|
svc_rdma_send_ctxt_put(rdma, ctxt);
|
|
|
|
|
|
|
|
if (unlikely(wc->status != IB_WC_SUCCESS)) {
|
|
|
|
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
|
|
|
|
svc_xprt_enqueue(&rdma->sc_xprt);
|
|
|
|
}
|
|
|
|
|
|
|
|
svc_xprt_put(&rdma->sc_xprt);
|
|
|
|
}
|
|
|
|
|
2018-05-08 03:28:20 +08:00
|
|
|
/**
|
|
|
|
* svc_rdma_send - Post a single Send WR
|
|
|
|
* @rdma: transport on which to post the WR
|
|
|
|
* @wr: prepared Send WR to post
|
|
|
|
*
|
|
|
|
* Returns zero the Send WR was posted successfully. Otherwise, a
|
|
|
|
* negative errno is returned.
|
|
|
|
*/
|
2018-05-08 03:28:04 +08:00
|
|
|
int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2018-05-08 03:28:20 +08:00
|
|
|
might_sleep();
|
2018-05-08 03:28:04 +08:00
|
|
|
|
|
|
|
/* If the SQ is full, wait until an SQ entry is available */
|
|
|
|
while (1) {
|
2018-05-08 03:28:20 +08:00
|
|
|
if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
|
2018-05-08 03:28:04 +08:00
|
|
|
atomic_inc(&rdma_stat_sq_starve);
|
|
|
|
trace_svcrdma_sq_full(rdma);
|
2018-05-08 03:28:20 +08:00
|
|
|
atomic_inc(&rdma->sc_sq_avail);
|
2018-05-08 03:28:04 +08:00
|
|
|
wait_event(rdma->sc_send_wait,
|
2018-05-08 03:28:20 +08:00
|
|
|
atomic_read(&rdma->sc_sq_avail) > 1);
|
2018-05-08 03:28:04 +08:00
|
|
|
if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
|
|
|
|
return -ENOTCONN;
|
|
|
|
trace_svcrdma_sq_retry(rdma);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2018-05-08 03:28:20 +08:00
|
|
|
svc_xprt_get(&rdma->sc_xprt);
|
2018-07-19 00:25:31 +08:00
|
|
|
ret = ib_post_send(rdma->sc_qp, wr, NULL);
|
2018-05-08 03:28:04 +08:00
|
|
|
trace_svcrdma_post_send(wr, ret);
|
|
|
|
if (ret) {
|
|
|
|
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
|
2018-05-08 03:28:20 +08:00
|
|
|
svc_xprt_put(&rdma->sc_xprt);
|
2018-05-08 03:28:04 +08:00
|
|
|
wake_up(&rdma->sc_send_wait);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-03-02 02:05:45 +08:00
|
|
|
static u32 xdr_padsize(u32 len)
|
|
|
|
{
|
|
|
|
return (len & 3) ? (4 - (len & 3)) : 0;
|
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
/* Returns length of transport header, in bytes.
|
|
|
|
*/
|
|
|
|
static unsigned int svc_rdma_reply_hdr_len(__be32 *rdma_resp)
|
|
|
|
{
|
|
|
|
unsigned int nsegs;
|
|
|
|
__be32 *p;
|
|
|
|
|
|
|
|
p = rdma_resp;
|
|
|
|
|
|
|
|
/* RPC-over-RDMA V1 replies never have a Read list. */
|
|
|
|
p += rpcrdma_fixed_maxsz + 1;
|
|
|
|
|
|
|
|
/* Skip Write list. */
|
|
|
|
while (*p++ != xdr_zero) {
|
|
|
|
nsegs = be32_to_cpup(p++);
|
|
|
|
p += nsegs * rpcrdma_segment_maxsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Skip Reply chunk. */
|
|
|
|
if (*p++ != xdr_zero) {
|
|
|
|
nsegs = be32_to_cpup(p++);
|
|
|
|
p += nsegs * rpcrdma_segment_maxsz;
|
|
|
|
}
|
|
|
|
|
|
|
|
return (unsigned long)p - (unsigned long)rdma_resp;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* One Write chunk is copied from Call transport header to Reply
|
|
|
|
* transport header. Each segment's length field is updated to
|
|
|
|
* reflect number of bytes consumed in the segment.
|
|
|
|
*
|
|
|
|
* Returns number of segments in this chunk.
|
|
|
|
*/
|
|
|
|
static unsigned int xdr_encode_write_chunk(__be32 *dst, __be32 *src,
|
|
|
|
unsigned int remaining)
|
|
|
|
{
|
|
|
|
unsigned int i, nsegs;
|
|
|
|
u32 seg_len;
|
|
|
|
|
|
|
|
/* Write list discriminator */
|
|
|
|
*dst++ = *src++;
|
|
|
|
|
|
|
|
/* number of segments in this chunk */
|
|
|
|
nsegs = be32_to_cpup(src);
|
|
|
|
*dst++ = *src++;
|
|
|
|
|
|
|
|
for (i = nsegs; i; i--) {
|
|
|
|
/* segment's RDMA handle */
|
|
|
|
*dst++ = *src++;
|
|
|
|
|
|
|
|
/* bytes returned in this segment */
|
|
|
|
seg_len = be32_to_cpu(*src);
|
|
|
|
if (remaining >= seg_len) {
|
|
|
|
/* entire segment was consumed */
|
|
|
|
*dst = *src;
|
|
|
|
remaining -= seg_len;
|
|
|
|
} else {
|
|
|
|
/* segment only partly filled */
|
|
|
|
*dst = cpu_to_be32(remaining);
|
|
|
|
remaining = 0;
|
|
|
|
}
|
|
|
|
dst++; src++;
|
|
|
|
|
|
|
|
/* segment's RDMA offset */
|
|
|
|
*dst++ = *src++;
|
|
|
|
*dst++ = *src++;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nsegs;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The client provided a Write list in the Call message. Fill in
|
|
|
|
* the segments in the first Write chunk in the Reply's transport
|
|
|
|
* header with the number of bytes consumed in each segment.
|
|
|
|
* Remaining chunks are returned unused.
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* - Client has provided only one Write chunk
|
|
|
|
*/
|
|
|
|
static void svc_rdma_xdr_encode_write_list(__be32 *rdma_resp, __be32 *wr_ch,
|
|
|
|
unsigned int consumed)
|
|
|
|
{
|
|
|
|
unsigned int nsegs;
|
|
|
|
__be32 *p, *q;
|
|
|
|
|
|
|
|
/* RPC-over-RDMA V1 replies never have a Read list. */
|
|
|
|
p = rdma_resp + rpcrdma_fixed_maxsz + 1;
|
|
|
|
|
|
|
|
q = wr_ch;
|
|
|
|
while (*q != xdr_zero) {
|
|
|
|
nsegs = xdr_encode_write_chunk(p, q, consumed);
|
|
|
|
q += 2 + nsegs * rpcrdma_segment_maxsz;
|
|
|
|
p += 2 + nsegs * rpcrdma_segment_maxsz;
|
|
|
|
consumed = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Terminate Write list */
|
|
|
|
*p++ = xdr_zero;
|
|
|
|
|
|
|
|
/* Reply chunk discriminator; may be replaced later */
|
|
|
|
*p = xdr_zero;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The client provided a Reply chunk in the Call message. Fill in
|
|
|
|
* the segments in the Reply chunk in the Reply message with the
|
|
|
|
* number of bytes consumed in each segment.
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* - Reply can always fit in the provided Reply chunk
|
|
|
|
*/
|
|
|
|
static void svc_rdma_xdr_encode_reply_chunk(__be32 *rdma_resp, __be32 *rp_ch,
|
|
|
|
unsigned int consumed)
|
|
|
|
{
|
|
|
|
__be32 *p;
|
|
|
|
|
|
|
|
/* Find the Reply chunk in the Reply's xprt header.
|
|
|
|
* RPC-over-RDMA V1 replies never have a Read list.
|
|
|
|
*/
|
|
|
|
p = rdma_resp + rpcrdma_fixed_maxsz + 1;
|
|
|
|
|
|
|
|
/* Skip past Write list */
|
|
|
|
while (*p++ != xdr_zero)
|
|
|
|
p += 1 + be32_to_cpup(p) * rpcrdma_segment_maxsz;
|
|
|
|
|
|
|
|
xdr_encode_write_chunk(p, rp_ch, consumed);
|
|
|
|
}
|
|
|
|
|
2016-11-30 00:04:42 +08:00
|
|
|
/* Parse the RPC Call's transport header.
|
2015-07-10 04:45:28 +08:00
|
|
|
*/
|
2017-04-10 01:06:25 +08:00
|
|
|
static void svc_rdma_get_write_arrays(__be32 *rdma_argp,
|
|
|
|
__be32 **write, __be32 **reply)
|
2015-07-10 04:45:28 +08:00
|
|
|
{
|
2016-11-30 00:04:42 +08:00
|
|
|
__be32 *p;
|
2015-07-10 04:45:28 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
p = rdma_argp + rpcrdma_fixed_maxsz;
|
2015-07-10 04:45:28 +08:00
|
|
|
|
2016-11-30 00:04:42 +08:00
|
|
|
/* Read list */
|
|
|
|
while (*p++ != xdr_zero)
|
|
|
|
p += 5;
|
2015-07-10 04:45:28 +08:00
|
|
|
|
2016-11-30 00:04:42 +08:00
|
|
|
/* Write list */
|
|
|
|
if (*p != xdr_zero) {
|
2017-04-10 01:06:25 +08:00
|
|
|
*write = p;
|
2016-11-30 00:04:42 +08:00
|
|
|
while (*p++ != xdr_zero)
|
|
|
|
p += 1 + be32_to_cpu(*p) * 4;
|
|
|
|
} else {
|
|
|
|
*write = NULL;
|
|
|
|
p++;
|
2015-07-10 04:45:28 +08:00
|
|
|
}
|
|
|
|
|
2016-11-30 00:04:42 +08:00
|
|
|
/* Reply chunk */
|
|
|
|
if (*p != xdr_zero)
|
2017-04-10 01:06:25 +08:00
|
|
|
*reply = p;
|
2016-11-30 00:04:42 +08:00
|
|
|
else
|
|
|
|
*reply = NULL;
|
2015-07-10 04:45:28 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:05:44 +08:00
|
|
|
static int svc_rdma_dma_map_page(struct svcxprt_rdma *rdma,
|
2018-05-08 03:28:04 +08:00
|
|
|
struct svc_rdma_send_ctxt *ctxt,
|
2017-04-10 01:05:44 +08:00
|
|
|
struct page *page,
|
2018-05-08 03:27:53 +08:00
|
|
|
unsigned long offset,
|
2017-04-10 01:05:44 +08:00
|
|
|
unsigned int len)
|
|
|
|
{
|
|
|
|
struct ib_device *dev = rdma->sc_cm_id->device;
|
|
|
|
dma_addr_t dma_addr;
|
|
|
|
|
|
|
|
dma_addr = ib_dma_map_page(dev, page, offset, len, DMA_TO_DEVICE);
|
2019-10-04 21:58:20 +08:00
|
|
|
trace_svcrdma_dma_map_page(rdma, dma_addr, len);
|
2017-04-10 01:05:44 +08:00
|
|
|
if (ib_dma_mapping_error(dev, dma_addr))
|
2017-06-24 05:17:15 +08:00
|
|
|
goto out_maperr;
|
2017-04-10 01:05:44 +08:00
|
|
|
|
2018-05-08 03:28:09 +08:00
|
|
|
ctxt->sc_sges[ctxt->sc_cur_sge_no].addr = dma_addr;
|
|
|
|
ctxt->sc_sges[ctxt->sc_cur_sge_no].length = len;
|
2018-05-08 03:28:04 +08:00
|
|
|
ctxt->sc_send_wr.num_sge++;
|
2017-04-10 01:05:44 +08:00
|
|
|
return 0;
|
2017-06-24 05:17:15 +08:00
|
|
|
|
|
|
|
out_maperr:
|
|
|
|
return -EIO;
|
2017-04-10 01:05:44 +08:00
|
|
|
}
|
|
|
|
|
2018-05-08 03:27:53 +08:00
|
|
|
/* ib_dma_map_page() is used here because svc_rdma_dma_unmap()
|
|
|
|
* handles DMA-unmap and it uses ib_dma_unmap_page() exclusively.
|
|
|
|
*/
|
|
|
|
static int svc_rdma_dma_map_buf(struct svcxprt_rdma *rdma,
|
2018-05-08 03:28:04 +08:00
|
|
|
struct svc_rdma_send_ctxt *ctxt,
|
2018-05-08 03:27:53 +08:00
|
|
|
unsigned char *base,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
2018-05-08 03:28:09 +08:00
|
|
|
return svc_rdma_dma_map_page(rdma, ctxt, virt_to_page(base),
|
2018-05-08 03:27:53 +08:00
|
|
|
offset_in_page(base), len);
|
|
|
|
}
|
|
|
|
|
2017-04-10 01:05:44 +08:00
|
|
|
/**
|
2018-05-08 03:28:25 +08:00
|
|
|
* svc_rdma_sync_reply_hdr - DMA sync the transport header buffer
|
2017-04-10 01:05:44 +08:00
|
|
|
* @rdma: controlling transport
|
2018-05-08 03:28:25 +08:00
|
|
|
* @ctxt: send_ctxt for the Send WR
|
2017-04-10 01:05:44 +08:00
|
|
|
* @len: length of transport header
|
|
|
|
*
|
|
|
|
*/
|
2018-05-08 03:28:25 +08:00
|
|
|
void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
|
|
|
|
struct svc_rdma_send_ctxt *ctxt,
|
|
|
|
unsigned int len)
|
2017-04-10 01:05:44 +08:00
|
|
|
{
|
2018-05-08 03:28:25 +08:00
|
|
|
ctxt->sc_sges[0].length = len;
|
|
|
|
ctxt->sc_send_wr.num_sge++;
|
|
|
|
ib_dma_sync_single_for_device(rdma->sc_pd->device,
|
|
|
|
ctxt->sc_sges[0].addr, len,
|
|
|
|
DMA_TO_DEVICE);
|
2017-04-10 01:05:44 +08:00
|
|
|
}
|
|
|
|
|
svcrdma: Remove max_sge check at connect time
Two and a half years ago, the client was changed to use gathered
Send for larger inline messages, in commit 655fec6987b ("xprtrdma:
Use gathered Send for large inline messages"). Several fixes were
required because there are a few in-kernel device drivers whose
max_sge is 3, and these were broken by the change.
Apparently my memory is going, because some time later, I submitted
commit 25fd86eca11c ("svcrdma: Don't overrun the SGE array in
svc_rdma_send_ctxt"), and after that, commit f3c1fd0ee294 ("svcrdma:
Reduce max_send_sges"). These too incorrectly assumed in-kernel
device drivers would have more than a few Send SGEs available.
The fix for the server side is not the same. This is because the
fundamental problem on the server is that, whether or not the client
has provisioned a chunk for the RPC reply, the server must squeeze
even the most complex RPC replies into a single RDMA Send. Failing
in the send path because of Send SGE exhaustion should never be an
option.
Therefore, instead of failing when the send path runs out of SGEs,
switch to using a bounce buffer mechanism to handle RPC replies that
are too complex for the device to send directly. That allows us to
remove the max_sge check to enable drivers with small max_sge to
work again.
Reported-by: Don Dutile <ddutile@redhat.com>
Fixes: 25fd86eca11c ("svcrdma: Don't overrun the SGE array in ...")
Cc: stable@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2019-01-26 05:54:54 +08:00
|
|
|
/* If the xdr_buf has more elements than the device can
|
|
|
|
* transmit in a single RDMA Send, then the reply will
|
|
|
|
* have to be copied into a bounce buffer.
|
|
|
|
*/
|
|
|
|
static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
|
|
|
|
struct xdr_buf *xdr,
|
|
|
|
__be32 *wr_lst)
|
|
|
|
{
|
|
|
|
int elements;
|
|
|
|
|
|
|
|
/* xdr->head */
|
|
|
|
elements = 1;
|
|
|
|
|
|
|
|
/* xdr->pages */
|
|
|
|
if (!wr_lst) {
|
|
|
|
unsigned int remaining;
|
|
|
|
unsigned long pageoff;
|
|
|
|
|
|
|
|
pageoff = xdr->page_base & ~PAGE_MASK;
|
|
|
|
remaining = xdr->page_len;
|
|
|
|
while (remaining) {
|
|
|
|
++elements;
|
|
|
|
remaining -= min_t(u32, PAGE_SIZE - pageoff,
|
|
|
|
remaining);
|
|
|
|
pageoff = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* xdr->tail */
|
|
|
|
if (xdr->tail[0].iov_len)
|
|
|
|
++elements;
|
|
|
|
|
|
|
|
/* assume 1 SGE is needed for the transport header */
|
|
|
|
return elements >= rdma->sc_max_send_sges;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The device is not capable of sending the reply directly.
|
|
|
|
* Assemble the elements of @xdr into the transport header
|
|
|
|
* buffer.
|
|
|
|
*/
|
|
|
|
static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
|
|
|
|
struct svc_rdma_send_ctxt *ctxt,
|
|
|
|
struct xdr_buf *xdr, __be32 *wr_lst)
|
|
|
|
{
|
|
|
|
unsigned char *dst, *tailbase;
|
|
|
|
unsigned int taillen;
|
|
|
|
|
|
|
|
dst = ctxt->sc_xprt_buf;
|
|
|
|
dst += ctxt->sc_sges[0].length;
|
|
|
|
|
|
|
|
memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
|
|
|
|
dst += xdr->head[0].iov_len;
|
|
|
|
|
|
|
|
tailbase = xdr->tail[0].iov_base;
|
|
|
|
taillen = xdr->tail[0].iov_len;
|
|
|
|
if (wr_lst) {
|
|
|
|
u32 xdrpad;
|
|
|
|
|
|
|
|
xdrpad = xdr_padsize(xdr->page_len);
|
|
|
|
if (taillen && xdrpad) {
|
|
|
|
tailbase += xdrpad;
|
|
|
|
taillen -= xdrpad;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
unsigned int len, remaining;
|
|
|
|
unsigned long pageoff;
|
|
|
|
struct page **ppages;
|
|
|
|
|
|
|
|
ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
|
|
|
|
pageoff = xdr->page_base & ~PAGE_MASK;
|
|
|
|
remaining = xdr->page_len;
|
|
|
|
while (remaining) {
|
|
|
|
len = min_t(u32, PAGE_SIZE - pageoff, remaining);
|
|
|
|
|
|
|
|
memcpy(dst, page_address(*ppages), len);
|
|
|
|
remaining -= len;
|
|
|
|
dst += len;
|
|
|
|
pageoff = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (taillen)
|
|
|
|
memcpy(dst, tailbase, taillen);
|
|
|
|
|
|
|
|
ctxt->sc_sges[0].length += xdr->len;
|
|
|
|
ib_dma_sync_single_for_device(rdma->sc_pd->device,
|
|
|
|
ctxt->sc_sges[0].addr,
|
|
|
|
ctxt->sc_sges[0].length,
|
|
|
|
DMA_TO_DEVICE);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-08 03:28:25 +08:00
|
|
|
/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
|
|
|
|
* @rdma: controlling transport
|
|
|
|
* @ctxt: send_ctxt for the Send WR
|
|
|
|
* @xdr: prepared xdr_buf containing RPC message
|
|
|
|
* @wr_lst: pointer to Call header's Write list, or NULL
|
|
|
|
*
|
|
|
|
* Load the xdr_buf into the ctxt's sge array, and DMA map each
|
2017-04-10 01:06:25 +08:00
|
|
|
* element as it is added.
|
|
|
|
*
|
2018-05-08 03:27:59 +08:00
|
|
|
* Returns zero on success, or a negative errno on failure.
|
2007-12-13 06:13:25 +08:00
|
|
|
*/
|
2018-05-08 03:28:25 +08:00
|
|
|
int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
|
|
|
|
struct svc_rdma_send_ctxt *ctxt,
|
|
|
|
struct xdr_buf *xdr, __be32 *wr_lst)
|
2007-12-13 06:13:25 +08:00
|
|
|
{
|
2018-05-08 03:28:09 +08:00
|
|
|
unsigned int len, remaining;
|
2018-05-08 03:27:53 +08:00
|
|
|
unsigned long page_off;
|
2017-04-10 01:06:25 +08:00
|
|
|
struct page **ppages;
|
|
|
|
unsigned char *base;
|
|
|
|
u32 xdr_pad;
|
|
|
|
int ret;
|
2007-12-13 06:13:25 +08:00
|
|
|
|
svcrdma: Remove max_sge check at connect time
Two and a half years ago, the client was changed to use gathered
Send for larger inline messages, in commit 655fec6987b ("xprtrdma:
Use gathered Send for large inline messages"). Several fixes were
required because there are a few in-kernel device drivers whose
max_sge is 3, and these were broken by the change.
Apparently my memory is going, because some time later, I submitted
commit 25fd86eca11c ("svcrdma: Don't overrun the SGE array in
svc_rdma_send_ctxt"), and after that, commit f3c1fd0ee294 ("svcrdma:
Reduce max_send_sges"). These too incorrectly assumed in-kernel
device drivers would have more than a few Send SGEs available.
The fix for the server side is not the same. This is because the
fundamental problem on the server is that, whether or not the client
has provisioned a chunk for the RPC reply, the server must squeeze
even the most complex RPC replies into a single RDMA Send. Failing
in the send path because of Send SGE exhaustion should never be an
option.
Therefore, instead of failing when the send path runs out of SGEs,
switch to using a bounce buffer mechanism to handle RPC replies that
are too complex for the device to send directly. That allows us to
remove the max_sge check to enable drivers with small max_sge to
work again.
Reported-by: Don Dutile <ddutile@redhat.com>
Fixes: 25fd86eca11c ("svcrdma: Don't overrun the SGE array in ...")
Cc: stable@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2019-01-26 05:54:54 +08:00
|
|
|
if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
|
|
|
|
return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
|
|
|
|
|
|
|
|
++ctxt->sc_cur_sge_no;
|
2018-05-08 03:28:09 +08:00
|
|
|
ret = svc_rdma_dma_map_buf(rdma, ctxt,
|
2017-04-10 01:06:25 +08:00
|
|
|
xdr->head[0].iov_base,
|
|
|
|
xdr->head[0].iov_len);
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
/* If a Write chunk is present, the xdr_buf's page list
|
|
|
|
* is not included inline. However the Upper Layer may
|
|
|
|
* have added XDR padding in the tail buffer, and that
|
|
|
|
* should not be included inline.
|
|
|
|
*/
|
|
|
|
if (wr_lst) {
|
|
|
|
base = xdr->tail[0].iov_base;
|
|
|
|
len = xdr->tail[0].iov_len;
|
|
|
|
xdr_pad = xdr_padsize(xdr->page_len);
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
if (len && xdr_pad) {
|
|
|
|
base += xdr_pad;
|
|
|
|
len -= xdr_pad;
|
2015-01-14 00:03:03 +08:00
|
|
|
}
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
goto tail;
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
|
|
|
|
page_off = xdr->page_base & ~PAGE_MASK;
|
|
|
|
remaining = xdr->page_len;
|
|
|
|
while (remaining) {
|
|
|
|
len = min_t(u32, PAGE_SIZE - page_off, remaining);
|
2016-03-02 02:05:36 +08:00
|
|
|
|
svcrdma: Remove max_sge check at connect time
Two and a half years ago, the client was changed to use gathered
Send for larger inline messages, in commit 655fec6987b ("xprtrdma:
Use gathered Send for large inline messages"). Several fixes were
required because there are a few in-kernel device drivers whose
max_sge is 3, and these were broken by the change.
Apparently my memory is going, because some time later, I submitted
commit 25fd86eca11c ("svcrdma: Don't overrun the SGE array in
svc_rdma_send_ctxt"), and after that, commit f3c1fd0ee294 ("svcrdma:
Reduce max_send_sges"). These too incorrectly assumed in-kernel
device drivers would have more than a few Send SGEs available.
The fix for the server side is not the same. This is because the
fundamental problem on the server is that, whether or not the client
has provisioned a chunk for the RPC reply, the server must squeeze
even the most complex RPC replies into a single RDMA Send. Failing
in the send path because of Send SGE exhaustion should never be an
option.
Therefore, instead of failing when the send path runs out of SGEs,
switch to using a bounce buffer mechanism to handle RPC replies that
are too complex for the device to send directly. That allows us to
remove the max_sge check to enable drivers with small max_sge to
work again.
Reported-by: Don Dutile <ddutile@redhat.com>
Fixes: 25fd86eca11c ("svcrdma: Don't overrun the SGE array in ...")
Cc: stable@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2019-01-26 05:54:54 +08:00
|
|
|
++ctxt->sc_cur_sge_no;
|
2018-05-08 03:28:09 +08:00
|
|
|
ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
|
|
|
|
page_off, len);
|
2017-04-10 01:06:25 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
remaining -= len;
|
|
|
|
page_off = 0;
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
base = xdr->tail[0].iov_base;
|
|
|
|
len = xdr->tail[0].iov_len;
|
|
|
|
tail:
|
|
|
|
if (len) {
|
svcrdma: Remove max_sge check at connect time
Two and a half years ago, the client was changed to use gathered
Send for larger inline messages, in commit 655fec6987b ("xprtrdma:
Use gathered Send for large inline messages"). Several fixes were
required because there are a few in-kernel device drivers whose
max_sge is 3, and these were broken by the change.
Apparently my memory is going, because some time later, I submitted
commit 25fd86eca11c ("svcrdma: Don't overrun the SGE array in
svc_rdma_send_ctxt"), and after that, commit f3c1fd0ee294 ("svcrdma:
Reduce max_send_sges"). These too incorrectly assumed in-kernel
device drivers would have more than a few Send SGEs available.
The fix for the server side is not the same. This is because the
fundamental problem on the server is that, whether or not the client
has provisioned a chunk for the RPC reply, the server must squeeze
even the most complex RPC replies into a single RDMA Send. Failing
in the send path because of Send SGE exhaustion should never be an
option.
Therefore, instead of failing when the send path runs out of SGEs,
switch to using a bounce buffer mechanism to handle RPC replies that
are too complex for the device to send directly. That allows us to
remove the max_sge check to enable drivers with small max_sge to
work again.
Reported-by: Don Dutile <ddutile@redhat.com>
Fixes: 25fd86eca11c ("svcrdma: Don't overrun the SGE array in ...")
Cc: stable@vger.kernel.org
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2019-01-26 05:54:54 +08:00
|
|
|
++ctxt->sc_cur_sge_no;
|
2018-05-08 03:28:09 +08:00
|
|
|
ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
|
2017-04-10 01:06:25 +08:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2016-03-02 02:05:36 +08:00
|
|
|
|
2018-05-08 03:27:59 +08:00
|
|
|
return 0;
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:00 +08:00
|
|
|
/* The svc_rqst and all resources it owns are released as soon as
|
|
|
|
* svc_rdma_sendto returns. Transfer pages under I/O to the ctxt
|
|
|
|
* so they are released by the Send completion handler.
|
|
|
|
*/
|
|
|
|
static void svc_rdma_save_io_pages(struct svc_rqst *rqstp,
|
2018-05-08 03:28:04 +08:00
|
|
|
struct svc_rdma_send_ctxt *ctxt)
|
2017-04-10 01:06:00 +08:00
|
|
|
{
|
|
|
|
int i, pages = rqstp->rq_next_page - rqstp->rq_respages;
|
|
|
|
|
2018-05-08 03:28:04 +08:00
|
|
|
ctxt->sc_page_count += pages;
|
2017-04-10 01:06:00 +08:00
|
|
|
for (i = 0; i < pages; i++) {
|
2018-05-08 03:28:25 +08:00
|
|
|
ctxt->sc_pages[i] = rqstp->rq_respages[i];
|
2017-04-10 01:06:00 +08:00
|
|
|
rqstp->rq_respages[i] = NULL;
|
|
|
|
}
|
2018-07-27 23:18:54 +08:00
|
|
|
|
|
|
|
/* Prevent svc_xprt_release from releasing pages in rq_pages */
|
|
|
|
rqstp->rq_next_page = rqstp->rq_respages;
|
2017-04-10 01:06:00 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
/* Prepare the portion of the RPC Reply that will be transmitted
|
|
|
|
* via RDMA Send. The RPC-over-RDMA transport header is prepared
|
2018-05-08 03:28:04 +08:00
|
|
|
* in sc_sges[0], and the RPC xdr_buf is prepared in following sges.
|
2017-04-10 01:06:25 +08:00
|
|
|
*
|
|
|
|
* Depending on whether a Write list or Reply chunk is present,
|
|
|
|
* the server may send all, a portion of, or none of the xdr_buf.
|
2018-05-08 03:28:04 +08:00
|
|
|
* In the latter case, only the transport header (sc_sges[0]) is
|
2017-04-10 01:06:25 +08:00
|
|
|
* transmitted.
|
|
|
|
*
|
|
|
|
* RDMA Send is the last step of transmitting an RPC reply. Pages
|
|
|
|
* involved in the earlier RDMA Writes are here transferred out
|
2018-11-28 00:11:35 +08:00
|
|
|
* of the rqstp and into the sctxt's page array. These pages are
|
2017-04-10 01:06:25 +08:00
|
|
|
* DMA unmapped by each Write completion, but the subsequent Send
|
|
|
|
* completion finally releases these pages.
|
|
|
|
*
|
|
|
|
* Assumptions:
|
|
|
|
* - The Reply's transport header will never be larger than a page.
|
2007-12-13 06:13:25 +08:00
|
|
|
*/
|
2017-04-10 01:06:25 +08:00
|
|
|
static int svc_rdma_send_reply_msg(struct svcxprt_rdma *rdma,
|
2018-11-28 00:11:35 +08:00
|
|
|
struct svc_rdma_send_ctxt *sctxt,
|
|
|
|
struct svc_rdma_recv_ctxt *rctxt,
|
2017-04-10 01:06:25 +08:00
|
|
|
struct svc_rqst *rqstp,
|
|
|
|
__be32 *wr_lst, __be32 *rp_ch)
|
2007-12-13 06:13:25 +08:00
|
|
|
{
|
2017-04-10 01:06:25 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!rp_ch) {
|
2018-11-28 00:11:35 +08:00
|
|
|
ret = svc_rdma_map_reply_msg(rdma, sctxt,
|
2017-04-10 01:06:25 +08:00
|
|
|
&rqstp->rq_res, wr_lst);
|
|
|
|
if (ret < 0)
|
2018-05-08 03:28:25 +08:00
|
|
|
return ret;
|
2015-01-14 00:03:03 +08:00
|
|
|
}
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2018-11-28 00:11:35 +08:00
|
|
|
svc_rdma_save_io_pages(rqstp, sctxt);
|
2014-05-29 04:12:01 +08:00
|
|
|
|
2018-11-28 00:11:35 +08:00
|
|
|
if (rctxt->rc_inv_rkey) {
|
|
|
|
sctxt->sc_send_wr.opcode = IB_WR_SEND_WITH_INV;
|
|
|
|
sctxt->sc_send_wr.ex.invalidate_rkey = rctxt->rc_inv_rkey;
|
|
|
|
} else {
|
|
|
|
sctxt->sc_send_wr.opcode = IB_WR_SEND;
|
2018-05-08 03:28:15 +08:00
|
|
|
}
|
|
|
|
dprintk("svcrdma: posting Send WR with %u sge(s)\n",
|
2018-11-28 00:11:35 +08:00
|
|
|
sctxt->sc_send_wr.num_sge);
|
|
|
|
return svc_rdma_send(rdma, &sctxt->sc_send_wr);
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:41 +08:00
|
|
|
/* Given the client-provided Write and Reply chunks, the server was not
|
|
|
|
* able to form a complete reply. Return an RDMA_ERROR message so the
|
|
|
|
* client can retire this RPC transaction. As above, the Send completion
|
|
|
|
* routine releases payload pages that were part of a previous RDMA Write.
|
|
|
|
*
|
|
|
|
* Remote Invalidation is skipped for simplicity.
|
|
|
|
*/
|
|
|
|
static int svc_rdma_send_error_msg(struct svcxprt_rdma *rdma,
|
2018-05-08 03:28:25 +08:00
|
|
|
struct svc_rdma_send_ctxt *ctxt,
|
|
|
|
struct svc_rqst *rqstp)
|
2017-04-10 01:06:41 +08:00
|
|
|
{
|
|
|
|
__be32 *p;
|
|
|
|
|
2018-05-08 03:28:25 +08:00
|
|
|
p = ctxt->sc_xprt_buf;
|
|
|
|
trace_svcrdma_err_chunk(*p);
|
|
|
|
p += 3;
|
2017-04-10 01:06:41 +08:00
|
|
|
*p++ = rdma_error;
|
|
|
|
*p = err_chunk;
|
2018-05-08 03:28:25 +08:00
|
|
|
svc_rdma_sync_reply_hdr(rdma, ctxt, RPCRDMA_HDRLEN_ERR);
|
2017-04-10 01:06:41 +08:00
|
|
|
|
|
|
|
svc_rdma_save_io_pages(rqstp, ctxt);
|
|
|
|
|
2018-05-08 03:28:15 +08:00
|
|
|
ctxt->sc_send_wr.opcode = IB_WR_SEND;
|
2020-03-03 04:00:14 +08:00
|
|
|
return svc_rdma_send(rdma, &ctxt->sc_send_wr);
|
2017-04-10 01:06:41 +08:00
|
|
|
}
|
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
/**
|
|
|
|
* svc_rdma_sendto - Transmit an RPC reply
|
|
|
|
* @rqstp: processed RPC request, reply XDR already in ::rq_res
|
|
|
|
*
|
|
|
|
* Any resources still associated with @rqstp are released upon return.
|
|
|
|
* If no reply message was possible, the connection is closed.
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* %0 if an RPC reply has been successfully posted,
|
|
|
|
* %-ENOMEM if a resource shortage occurred (connection is lost),
|
|
|
|
* %-ENOTCONN if posting failed (connection is lost).
|
|
|
|
*/
|
2007-12-13 06:13:25 +08:00
|
|
|
int svc_rdma_sendto(struct svc_rqst *rqstp)
|
|
|
|
{
|
|
|
|
struct svc_xprt *xprt = rqstp->rq_xprt;
|
|
|
|
struct svcxprt_rdma *rdma =
|
|
|
|
container_of(xprt, struct svcxprt_rdma, sc_xprt);
|
2018-05-08 03:27:37 +08:00
|
|
|
struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
|
2017-04-10 01:06:25 +08:00
|
|
|
__be32 *p, *rdma_argp, *rdma_resp, *wr_lst, *rp_ch;
|
|
|
|
struct xdr_buf *xdr = &rqstp->rq_res;
|
2018-05-08 03:28:25 +08:00
|
|
|
struct svc_rdma_send_ctxt *sctxt;
|
2017-04-10 01:06:25 +08:00
|
|
|
int ret;
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2018-05-08 03:27:43 +08:00
|
|
|
rdma_argp = rctxt->rc_recv_buf;
|
2017-04-10 01:06:25 +08:00
|
|
|
svc_rdma_get_write_arrays(rdma_argp, &wr_lst, &rp_ch);
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2016-11-30 00:04:50 +08:00
|
|
|
/* Create the RDMA response header. xprt->xpt_mutex,
|
|
|
|
* acquired in svc_send(), serializes RPC replies. The
|
|
|
|
* code path below that inserts the credit grant value
|
|
|
|
* into each transport header runs only inside this
|
|
|
|
* critical section.
|
|
|
|
*/
|
2016-01-08 03:49:45 +08:00
|
|
|
ret = -ENOMEM;
|
2018-05-08 03:28:25 +08:00
|
|
|
sctxt = svc_rdma_send_ctxt_get(rdma);
|
|
|
|
if (!sctxt)
|
2016-01-08 03:49:45 +08:00
|
|
|
goto err0;
|
2018-05-08 03:28:25 +08:00
|
|
|
rdma_resp = sctxt->sc_xprt_buf;
|
2017-02-08 00:58:23 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
p = rdma_resp;
|
|
|
|
*p++ = *rdma_argp;
|
|
|
|
*p++ = *(rdma_argp + 1);
|
2017-02-08 00:58:23 +08:00
|
|
|
*p++ = rdma->sc_fc_credits;
|
2017-04-10 01:06:25 +08:00
|
|
|
*p++ = rp_ch ? rdma_nomsg : rdma_msg;
|
2017-02-08 00:58:23 +08:00
|
|
|
|
|
|
|
/* Start with empty chunks */
|
|
|
|
*p++ = xdr_zero;
|
|
|
|
*p++ = xdr_zero;
|
|
|
|
*p = xdr_zero;
|
2007-12-13 06:13:25 +08:00
|
|
|
|
2017-04-10 01:06:25 +08:00
|
|
|
if (wr_lst) {
|
|
|
|
/* XXX: Presume the client sent only one Write chunk */
|
2020-03-03 03:45:53 +08:00
|
|
|
unsigned long offset;
|
|
|
|
unsigned int length;
|
|
|
|
|
|
|
|
if (rctxt->rc_read_payload_length) {
|
|
|
|
offset = rctxt->rc_read_payload_offset;
|
|
|
|
length = rctxt->rc_read_payload_length;
|
|
|
|
} else {
|
|
|
|
offset = xdr->head[0].iov_len;
|
|
|
|
length = xdr->page_len;
|
|
|
|
}
|
|
|
|
ret = svc_rdma_send_write_chunk(rdma, wr_lst, xdr, offset,
|
|
|
|
length);
|
2016-03-02 02:05:36 +08:00
|
|
|
if (ret < 0)
|
2017-04-10 01:06:41 +08:00
|
|
|
goto err2;
|
2017-04-10 01:06:25 +08:00
|
|
|
svc_rdma_xdr_encode_write_list(rdma_resp, wr_lst, ret);
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
2017-04-10 01:06:25 +08:00
|
|
|
if (rp_ch) {
|
|
|
|
ret = svc_rdma_send_reply_chunk(rdma, rp_ch, wr_lst, xdr);
|
2016-03-02 02:05:36 +08:00
|
|
|
if (ret < 0)
|
2017-04-10 01:06:41 +08:00
|
|
|
goto err2;
|
2017-04-10 01:06:25 +08:00
|
|
|
svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret);
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
|
|
|
|
2018-05-08 03:28:25 +08:00
|
|
|
svc_rdma_sync_reply_hdr(rdma, sctxt, svc_rdma_reply_hdr_len(rdma_resp));
|
2018-11-28 00:11:35 +08:00
|
|
|
ret = svc_rdma_send_reply_msg(rdma, sctxt, rctxt, rqstp,
|
2017-04-10 01:06:25 +08:00
|
|
|
wr_lst, rp_ch);
|
2016-03-02 02:06:11 +08:00
|
|
|
if (ret < 0)
|
2018-05-08 03:28:25 +08:00
|
|
|
goto err1;
|
2018-05-08 03:27:37 +08:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
|
|
|
rqstp->rq_xprt_ctxt = NULL;
|
|
|
|
svc_rdma_recv_ctxt_put(rdma, rctxt);
|
|
|
|
return ret;
|
2008-10-04 04:45:03 +08:00
|
|
|
|
2017-04-10 01:06:41 +08:00
|
|
|
err2:
|
2017-07-14 01:51:15 +08:00
|
|
|
if (ret != -E2BIG && ret != -EINVAL)
|
2017-04-10 01:06:41 +08:00
|
|
|
goto err1;
|
|
|
|
|
2018-05-08 03:28:25 +08:00
|
|
|
ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
|
2017-04-10 01:06:41 +08:00
|
|
|
if (ret < 0)
|
2018-05-08 03:28:25 +08:00
|
|
|
goto err1;
|
2018-05-08 03:27:37 +08:00
|
|
|
ret = 0;
|
|
|
|
goto out;
|
2017-04-10 01:06:41 +08:00
|
|
|
|
2008-10-04 04:45:03 +08:00
|
|
|
err1:
|
2018-05-08 03:28:25 +08:00
|
|
|
svc_rdma_send_ctxt_put(rdma, sctxt);
|
2008-10-04 04:45:03 +08:00
|
|
|
err0:
|
2018-05-08 03:27:16 +08:00
|
|
|
trace_svcrdma_send_failed(rqstp, ret);
|
2017-04-10 01:06:25 +08:00
|
|
|
set_bit(XPT_CLOSE, &xprt->xpt_flags);
|
2018-05-08 03:27:37 +08:00
|
|
|
ret = -ENOTCONN;
|
|
|
|
goto out;
|
2007-12-13 06:13:25 +08:00
|
|
|
}
|
2020-03-03 03:45:53 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* svc_rdma_read_payload - special processing for a READ payload
|
|
|
|
* @rqstp: svc_rqst to operate on
|
|
|
|
* @offset: payload's byte offset in @xdr
|
|
|
|
* @length: size of payload, in bytes
|
|
|
|
*
|
|
|
|
* Returns zero on success.
|
|
|
|
*
|
|
|
|
* For the moment, just record the xdr_buf location of the READ
|
|
|
|
* payload. svc_rdma_sendto will use that location later when
|
|
|
|
* we actually send the payload.
|
|
|
|
*/
|
|
|
|
int svc_rdma_read_payload(struct svc_rqst *rqstp, unsigned int offset,
|
|
|
|
unsigned int length)
|
|
|
|
{
|
|
|
|
struct svc_rdma_recv_ctxt *rctxt = rqstp->rq_xprt_ctxt;
|
|
|
|
|
|
|
|
/* XXX: Just one READ payload slot for now, since our
|
|
|
|
* transport implementation currently supports only one
|
|
|
|
* Write chunk.
|
|
|
|
*/
|
|
|
|
rctxt->rc_read_payload_offset = offset;
|
|
|
|
rctxt->rc_read_payload_length = length;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|