mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-4.3/sg' of git://git.kernel.dk/linux-block
Pull SG updates from Jens Axboe: "This contains a set of scatter-gather related changes/fixes for 4.3: - Add support for limited chaining of sg tables even for architectures that do not set ARCH_HAS_SG_CHAIN. From Christoph. - Add sg chain support to target_rd. From Christoph. - Fixup open coded sg->page_link in crypto/omap-sham. From Christoph. - Fixup open coded crypto ->page_link manipulation. From Dan. - Also from Dan, automated fixup of manual sg_unmark_end() manipulations. - Also from Dan, automated fixup of open coded sg_phys() implementations. - From Robert Jarzmik, addition of an sg table splitting helper that drivers can use" * 'for-4.3/sg' of git://git.kernel.dk/linux-block: lib: scatterlist: add sg splitting function scatterlist: use sg_phys() crypto/omap-sham: remove an open coded access to ->page_link scatterlist: remove open coded sg_unmark_end instances crypto: replace scatterwalk_sg_chain with sg_chain target/rd: always chain S/G list scatterlist: allow limited chaining without ARCH_HAS_SG_CHAIN
This commit is contained in:
commit
d975f309a8
|
@ -1520,7 +1520,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
return -ENOMEM;
|
||||
|
||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
|
||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||
|
||||
if (!is_coherent &&
|
||||
|
|
|
@ -61,8 +61,7 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
/* FIXME this part of code is untested */
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg);
|
||||
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
|
||||
sg->length, direction);
|
||||
__dma_sync(sg_phys(sg), sg->length, direction);
|
||||
}
|
||||
|
||||
return nents;
|
||||
|
|
|
@ -393,7 +393,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
|
|||
if (rq->cmd_flags & REQ_WRITE)
|
||||
memset(q->dma_drain_buffer, 0, q->dma_drain_size);
|
||||
|
||||
sg->page_link &= ~0x02;
|
||||
sg_unmark_end(sg);
|
||||
sg = sg_next(sg);
|
||||
sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
|
||||
q->dma_drain_size,
|
||||
|
|
|
@ -145,7 +145,7 @@ static int skcipher_alloc_sgl(struct sock *sk)
|
|||
sgl->cur = 0;
|
||||
|
||||
if (sg)
|
||||
scatterwalk_sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
|
||||
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
|
||||
|
||||
list_add_tail(&sgl->list, &ctx->tsgl);
|
||||
}
|
||||
|
|
|
@ -206,14 +206,14 @@ static void crypto_gcm_init_common(struct aead_request *req)
|
|||
sg_set_buf(pctx->src, pctx->auth_tag, sizeof(pctx->auth_tag));
|
||||
sg = scatterwalk_ffwd(pctx->src + 1, req->src, req->assoclen);
|
||||
if (sg != pctx->src + 1)
|
||||
scatterwalk_sg_chain(pctx->src, 2, sg);
|
||||
sg_chain(pctx->src, 2, sg);
|
||||
|
||||
if (req->src != req->dst) {
|
||||
sg_init_table(pctx->dst, 3);
|
||||
sg_set_buf(pctx->dst, pctx->auth_tag, sizeof(pctx->auth_tag));
|
||||
sg = scatterwalk_ffwd(pctx->dst + 1, req->dst, req->assoclen);
|
||||
if (sg != pctx->dst + 1)
|
||||
scatterwalk_sg_chain(pctx->dst, 2, sg);
|
||||
sg_chain(pctx->dst, 2, sg);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -370,8 +370,7 @@ static int bfin_crypto_crc_handle_queue(struct bfin_crypto_crc *crc,
|
|||
sg_init_table(ctx->bufsl, nsg);
|
||||
sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len);
|
||||
if (nsg > 1)
|
||||
scatterwalk_sg_chain(ctx->bufsl, nsg,
|
||||
req->src);
|
||||
sg_chain(ctx->bufsl, nsg, req->src);
|
||||
ctx->sg = ctx->bufsl;
|
||||
} else
|
||||
ctx->sg = req->src;
|
||||
|
|
|
@ -588,7 +588,7 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr,
|
|||
* the dmaengine may try to DMA the incorrect amount of data.
|
||||
*/
|
||||
sg_init_table(&ctx->sgl, 1);
|
||||
ctx->sgl.page_link = ctx->sg->page_link;
|
||||
sg_assign_page(&ctx->sgl, sg_page(ctx->sg));
|
||||
ctx->sgl.offset = ctx->sg->offset;
|
||||
sg_dma_len(&ctx->sgl) = len32;
|
||||
sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg);
|
||||
|
|
|
@ -296,7 +296,7 @@ static int qce_ahash_update(struct ahash_request *req)
|
|||
if (rctx->buflen) {
|
||||
sg_init_table(rctx->sg, 2);
|
||||
sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
|
||||
scatterwalk_sg_chain(rctx->sg, 2, req->src);
|
||||
sg_chain(rctx->sg, 2, req->src);
|
||||
req->src = rctx->sg;
|
||||
}
|
||||
|
||||
|
|
|
@ -999,7 +999,7 @@ static int sahara_sha_prepare_request(struct ahash_request *req)
|
|||
sg_init_table(rctx->in_sg_chain, 2);
|
||||
sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
|
||||
|
||||
scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src);
|
||||
sg_chain(rctx->in_sg_chain, 2, req->src);
|
||||
|
||||
rctx->total = req->nbytes + rctx->buf_cnt;
|
||||
rctx->in_sg = rctx->in_sg_chain;
|
||||
|
|
|
@ -1929,7 +1929,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
|
|||
sg_init_table(req_ctx->bufsl, nsg);
|
||||
sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
|
||||
if (nsg > 1)
|
||||
scatterwalk_sg_chain(req_ctx->bufsl, 2, areq->src);
|
||||
sg_chain(req_ctx->bufsl, 2, areq->src);
|
||||
req_ctx->psrc = req_ctx->bufsl;
|
||||
} else
|
||||
req_ctx->psrc = areq->src;
|
||||
|
|
|
@ -2103,7 +2103,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|||
sg_res = aligned_nrpages(sg->offset, sg->length);
|
||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
pteval = page_to_phys(sg_page(sg)) | prot;
|
||||
pteval = (sg_phys(sg) & PAGE_MASK) | prot;
|
||||
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -3631,7 +3631,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
|||
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
|
||||
sg->dma_address = sg_phys(sg);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
return nelems;
|
||||
|
|
|
@ -1408,7 +1408,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||
phys_addr_t phys = sg_phys(s);
|
||||
|
||||
/*
|
||||
* We are mapping on IOMMU page boundaries, so offset within
|
||||
|
|
|
@ -467,7 +467,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
|
|||
sg_set_buf(__sg, buf + offset, len);
|
||||
offset += len;
|
||||
remain -= len;
|
||||
(__sg++)->page_link &= ~0x02;
|
||||
sg_unmark_end(__sg++);
|
||||
sg_len++;
|
||||
} while (remain);
|
||||
}
|
||||
|
@ -475,7 +475,7 @@ static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
|
|||
list_for_each_entry(req, &packed->list, queuelist) {
|
||||
sg_len += blk_rq_map_sg(mq->queue, req, __sg);
|
||||
__sg = sg + (sg_len - 1);
|
||||
(__sg++)->page_link &= ~0x02;
|
||||
sg_unmark_end(__sg++);
|
||||
}
|
||||
sg_mark_end(sg + (sg_len - 1));
|
||||
return sg_len;
|
||||
|
|
|
@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
|
|||
err:
|
||||
sg = table->sgl;
|
||||
for (i -= 1; i >= 0; i--) {
|
||||
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
|
||||
sg->length);
|
||||
sg = sg_next(sg);
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
|||
DMA_BIDIRECTIONAL);
|
||||
|
||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
|
||||
gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
|
||||
sg->length);
|
||||
}
|
||||
chunk_heap->allocated -= allocated_size;
|
||||
|
|
|
@ -138,16 +138,12 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
|
|||
sg_per_table = (total_sg_needed > max_sg_per_table) ?
|
||||
max_sg_per_table : total_sg_needed;
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
|
||||
/*
|
||||
* Reserve extra element for chain entry
|
||||
*/
|
||||
if (sg_per_table < total_sg_needed)
|
||||
chain_entry = 1;
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
|
||||
|
||||
sg = kcalloc(sg_per_table + chain_entry, sizeof(*sg),
|
||||
GFP_KERNEL);
|
||||
if (!sg) {
|
||||
|
@ -158,15 +154,11 @@ static int rd_allocate_sgl_table(struct rd_dev *rd_dev, struct rd_dev_sg_table *
|
|||
|
||||
sg_init_table(sg, sg_per_table + chain_entry);
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
|
||||
if (i > 0) {
|
||||
sg_chain(sg_table[i - 1].sg_table,
|
||||
max_sg_per_table + 1, sg);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARCH_HAS_SG_CHAIN */
|
||||
|
||||
sg_table[i].sg_table = sg;
|
||||
sg_table[i].rd_sg_count = sg_per_table;
|
||||
sg_table[i].page_start_offset = page_offset;
|
||||
|
@ -430,42 +422,6 @@ static sense_reason_t rd_do_prot_rw(struct se_cmd *cmd, bool is_read)
|
|||
prot_sg = &prot_table->sg_table[prot_page -
|
||||
prot_table->page_start_offset];
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
|
||||
prot_npages = DIV_ROUND_UP(prot_offset + sectors * se_dev->prot_length,
|
||||
PAGE_SIZE);
|
||||
|
||||
/*
|
||||
* Allocate temporaly contiguous scatterlist entries if prot pages
|
||||
* straddles multiple scatterlist tables.
|
||||
*/
|
||||
if (prot_table->page_end_offset < prot_page + prot_npages - 1) {
|
||||
int i;
|
||||
|
||||
prot_sg = kcalloc(prot_npages, sizeof(*prot_sg), GFP_KERNEL);
|
||||
if (!prot_sg)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
need_to_release = true;
|
||||
sg_init_table(prot_sg, prot_npages);
|
||||
|
||||
for (i = 0; i < prot_npages; i++) {
|
||||
if (prot_page + i > prot_table->page_end_offset) {
|
||||
prot_table = rd_get_prot_table(dev,
|
||||
prot_page + i);
|
||||
if (!prot_table) {
|
||||
kfree(prot_sg);
|
||||
return rc;
|
||||
}
|
||||
sg_unmark_end(&prot_sg[i - 1]);
|
||||
}
|
||||
prot_sg[i] = prot_table->sg_table[prot_page + i -
|
||||
prot_table->page_start_offset];
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !CONFIG_ARCH_HAS_SG_CHAIN */
|
||||
|
||||
if (is_read)
|
||||
rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0,
|
||||
prot_sg, prot_offset);
|
||||
|
|
|
@ -25,14 +25,6 @@
|
|||
#include <linux/scatterlist.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
|
||||
struct scatterlist *sg2)
|
||||
{
|
||||
sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
|
||||
sg1[num - 1].page_link &= ~0x02;
|
||||
sg1[num - 1].page_link |= 0x01;
|
||||
}
|
||||
|
||||
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
|
||||
struct scatterlist *sg,
|
||||
int chain, int num)
|
||||
|
@ -43,7 +35,7 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head,
|
|||
}
|
||||
|
||||
if (sg)
|
||||
scatterwalk_sg_chain(head, num, sg);
|
||||
sg_chain(head, num, sg);
|
||||
else
|
||||
sg_mark_end(head);
|
||||
}
|
||||
|
|
|
@ -161,10 +161,6 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
|
|||
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
|
||||
struct scatterlist *sgl)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
BUG();
|
||||
#endif
|
||||
|
||||
/*
|
||||
* offset and length are unused for chain entry. Clear them.
|
||||
*/
|
||||
|
@ -251,6 +247,11 @@ struct scatterlist *sg_next(struct scatterlist *);
|
|||
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
|
||||
void sg_init_table(struct scatterlist *, unsigned int);
|
||||
void sg_init_one(struct scatterlist *, const void *, unsigned int);
|
||||
int sg_split(struct scatterlist *in, const int in_mapped_nents,
|
||||
const off_t skip, const int nb_splits,
|
||||
const size_t *split_sizes,
|
||||
struct scatterlist **out, int *out_mapped_nents,
|
||||
gfp_t gfp_mask);
|
||||
|
||||
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
|
||||
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
|
||||
|
|
|
@ -521,6 +521,13 @@ config UCS2_STRING
|
|||
|
||||
source "lib/fonts/Kconfig"
|
||||
|
||||
config SG_SPLIT
|
||||
def_bool n
|
||||
help
|
||||
Provides a heler to split scatterlists into chunks, each chunk being a
|
||||
scatterlist. This should be selected by a driver or an API which
|
||||
whishes to split a scatterlist amongst multiple DMA channel.
|
||||
|
||||
#
|
||||
# sg chaining option
|
||||
#
|
||||
|
|
|
@ -160,6 +160,7 @@ obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
|
|||
|
||||
obj-$(CONFIG_GENERIC_NET_UTILS) += net_utils.o
|
||||
|
||||
obj-$(CONFIG_SG_SPLIT) += sg_split.o
|
||||
obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
|
||||
|
||||
libfdt_files = fdt.o fdt_ro.o fdt_wip.o fdt_rw.o fdt_sw.o fdt_strerror.o \
|
||||
|
|
|
@ -105,16 +105,12 @@ EXPORT_SYMBOL(sg_nents_for_len);
|
|||
**/
|
||||
struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_HAS_SG_CHAIN
|
||||
struct scatterlist *ret = &sgl[nents - 1];
|
||||
#else
|
||||
struct scatterlist *sg, *ret = NULL;
|
||||
unsigned int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
ret = sg;
|
||||
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_SG
|
||||
BUG_ON(sgl[0].sg_magic != SG_MAGIC);
|
||||
BUG_ON(!sg_is_last(ret));
|
||||
|
|
|
@ -0,0 +1,202 @@
|
|||
/*
|
||||
* Copyright (C) 2015 Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
*
|
||||
* Scatterlist splitting helpers.
|
||||
*
|
||||
* This source code is licensed under the GNU General Public License,
|
||||
* Version 2. See the file COPYING for more details.
|
||||
*/
|
||||
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct sg_splitter {
|
||||
struct scatterlist *in_sg0;
|
||||
int nents;
|
||||
off_t skip_sg0;
|
||||
unsigned int length_last_sg;
|
||||
|
||||
struct scatterlist *out_sg;
|
||||
};
|
||||
|
||||
static int sg_calculate_split(struct scatterlist *in, int nents, int nb_splits,
|
||||
off_t skip, const size_t *sizes,
|
||||
struct sg_splitter *splitters, bool mapped)
|
||||
{
|
||||
int i;
|
||||
unsigned int sglen;
|
||||
size_t size = sizes[0], len;
|
||||
struct sg_splitter *curr = splitters;
|
||||
struct scatterlist *sg;
|
||||
|
||||
for (i = 0; i < nb_splits; i++) {
|
||||
splitters[i].in_sg0 = NULL;
|
||||
splitters[i].nents = 0;
|
||||
}
|
||||
|
||||
for_each_sg(in, sg, nents, i) {
|
||||
sglen = mapped ? sg_dma_len(sg) : sg->length;
|
||||
if (skip > sglen) {
|
||||
skip -= sglen;
|
||||
continue;
|
||||
}
|
||||
|
||||
len = min_t(size_t, size, sglen - skip);
|
||||
if (!curr->in_sg0) {
|
||||
curr->in_sg0 = sg;
|
||||
curr->skip_sg0 = skip;
|
||||
}
|
||||
size -= len;
|
||||
curr->nents++;
|
||||
curr->length_last_sg = len;
|
||||
|
||||
while (!size && (skip + len < sglen) && (--nb_splits > 0)) {
|
||||
curr++;
|
||||
size = *(++sizes);
|
||||
skip += len;
|
||||
len = min_t(size_t, size, sglen - skip);
|
||||
|
||||
curr->in_sg0 = sg;
|
||||
curr->skip_sg0 = skip;
|
||||
curr->nents = 1;
|
||||
curr->length_last_sg = len;
|
||||
size -= len;
|
||||
}
|
||||
skip = 0;
|
||||
|
||||
if (!size && --nb_splits > 0) {
|
||||
curr++;
|
||||
size = *(++sizes);
|
||||
}
|
||||
|
||||
if (!nb_splits)
|
||||
break;
|
||||
}
|
||||
|
||||
return (size || !splitters[0].in_sg0) ? -EINVAL : 0;
|
||||
}
|
||||
|
||||
static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits)
|
||||
{
|
||||
int i, j;
|
||||
struct scatterlist *in_sg, *out_sg;
|
||||
struct sg_splitter *split;
|
||||
|
||||
for (i = 0, split = splitters; i < nb_splits; i++, split++) {
|
||||
in_sg = split->in_sg0;
|
||||
out_sg = split->out_sg;
|
||||
for (j = 0; j < split->nents; j++, out_sg++) {
|
||||
*out_sg = *in_sg;
|
||||
if (!j) {
|
||||
out_sg->offset += split->skip_sg0;
|
||||
out_sg->length -= split->skip_sg0;
|
||||
} else {
|
||||
out_sg->offset = 0;
|
||||
}
|
||||
sg_dma_address(out_sg) = 0;
|
||||
sg_dma_len(out_sg) = 0;
|
||||
in_sg = sg_next(in_sg);
|
||||
}
|
||||
out_sg[-1].length = split->length_last_sg;
|
||||
sg_mark_end(out_sg - 1);
|
||||
}
|
||||
}
|
||||
|
||||
static void sg_split_mapped(struct sg_splitter *splitters, const int nb_splits)
|
||||
{
|
||||
int i, j;
|
||||
struct scatterlist *in_sg, *out_sg;
|
||||
struct sg_splitter *split;
|
||||
|
||||
for (i = 0, split = splitters; i < nb_splits; i++, split++) {
|
||||
in_sg = split->in_sg0;
|
||||
out_sg = split->out_sg;
|
||||
for (j = 0; j < split->nents; j++, out_sg++) {
|
||||
sg_dma_address(out_sg) = sg_dma_address(in_sg);
|
||||
sg_dma_len(out_sg) = sg_dma_len(in_sg);
|
||||
if (!j) {
|
||||
sg_dma_address(out_sg) += split->skip_sg0;
|
||||
sg_dma_len(out_sg) -= split->skip_sg0;
|
||||
}
|
||||
in_sg = sg_next(in_sg);
|
||||
}
|
||||
sg_dma_len(--out_sg) = split->length_last_sg;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* sg_split - split a scatterlist into several scatterlists
|
||||
* @in: the input sg list
|
||||
* @in_mapped_nents: the result of a dma_map_sg(in, ...), or 0 if not mapped.
|
||||
* @skip: the number of bytes to skip in the input sg list
|
||||
* @nb_splits: the number of desired sg outputs
|
||||
* @split_sizes: the respective size of each output sg list in bytes
|
||||
* @out: an array where to store the allocated output sg lists
|
||||
* @out_mapped_nents: the resulting sg lists mapped number of sg entries. Might
|
||||
* be NULL if sglist not already mapped (in_mapped_nents = 0)
|
||||
* @gfp_mask: the allocation flag
|
||||
*
|
||||
* This function splits the input sg list into nb_splits sg lists, which are
|
||||
* allocated and stored into out.
|
||||
* The @in is split into :
|
||||
* - @out[0], which covers bytes [@skip .. @skip + @split_sizes[0] - 1] of @in
|
||||
* - @out[1], which covers bytes [@skip + split_sizes[0] ..
|
||||
* @skip + @split_sizes[0] + @split_sizes[1] -1]
|
||||
* etc ...
|
||||
* It will be the caller's duty to kfree() out array members.
|
||||
*
|
||||
* Returns 0 upon success, or error code
|
||||
*/
|
||||
int sg_split(struct scatterlist *in, const int in_mapped_nents,
|
||||
const off_t skip, const int nb_splits,
|
||||
const size_t *split_sizes,
|
||||
struct scatterlist **out, int *out_mapped_nents,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
int i, ret;
|
||||
struct sg_splitter *splitters;
|
||||
|
||||
splitters = kcalloc(nb_splits, sizeof(*splitters), gfp_mask);
|
||||
if (!splitters)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = sg_calculate_split(in, sg_nents(in), nb_splits, skip, split_sizes,
|
||||
splitters, false);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
ret = -ENOMEM;
|
||||
for (i = 0; i < nb_splits; i++) {
|
||||
splitters[i].out_sg = kmalloc_array(splitters[i].nents,
|
||||
sizeof(struct scatterlist),
|
||||
gfp_mask);
|
||||
if (!splitters[i].out_sg)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/*
|
||||
* The order of these 3 calls is important and should be kept.
|
||||
*/
|
||||
sg_split_phys(splitters, nb_splits);
|
||||
ret = sg_calculate_split(in, in_mapped_nents, nb_splits, skip,
|
||||
split_sizes, splitters, true);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
sg_split_mapped(splitters, nb_splits);
|
||||
|
||||
for (i = 0; i < nb_splits; i++) {
|
||||
out[i] = splitters[i].out_sg;
|
||||
if (out_mapped_nents)
|
||||
out_mapped_nents[i] = splitters[i].nents;
|
||||
}
|
||||
|
||||
kfree(splitters);
|
||||
return 0;
|
||||
|
||||
err:
|
||||
for (i = 0; i < nb_splits; i++)
|
||||
kfree(splitters[i].out_sg);
|
||||
kfree(splitters);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(sg_split);
|
Loading…
Reference in New Issue