2019-07-31 23:57:31 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2019-06-24 15:22:55 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2019 HUAWEI, Inc.
|
|
|
|
* http://www.huawei.com/
|
|
|
|
* Created by Gao Xiang <gaoxiang25@huawei.com>
|
|
|
|
*/
|
|
|
|
#include "compress.h"
|
2019-07-31 23:57:44 +08:00
|
|
|
#include <linux/module.h>
|
2019-06-24 15:22:55 +08:00
|
|
|
#include <linux/lz4.h>
|
|
|
|
|
|
|
|
#ifndef LZ4_DISTANCE_MAX /* history window size */
|
|
|
|
#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
|
|
|
|
#endif
|
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
|
2019-06-24 15:22:56 +08:00
|
|
|
#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
|
|
|
|
#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
|
|
|
|
#endif
|
2019-06-24 15:22:55 +08:00
|
|
|
|
|
|
|
struct z_erofs_decompressor {
|
|
|
|
/*
|
|
|
|
* if destpages have sparsed pages, fill them with bounce pages.
|
|
|
|
* it also check whether destpages indicate continuous physical memory.
|
|
|
|
*/
|
|
|
|
int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
|
|
|
|
struct list_head *pagepool);
|
|
|
|
int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
|
|
|
|
char *name;
|
|
|
|
};
|
|
|
|
|
2019-07-31 23:57:44 +08:00
|
|
|
static bool use_vmap;
|
|
|
|
module_param(use_vmap, bool, 0444);
|
|
|
|
MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)");
|
|
|
|
|
2019-06-24 15:22:55 +08:00
|
|
|
static int lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
|
|
|
|
struct list_head *pagepool)
|
|
|
|
{
|
|
|
|
const unsigned int nr =
|
|
|
|
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
|
|
|
struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
|
|
|
|
BITS_PER_LONG)] = { 0 };
|
2019-06-24 15:22:55 +08:00
|
|
|
void *kaddr = NULL;
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
unsigned int i, j, top;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
top = 0;
|
|
|
|
for (i = j = 0; i < nr; ++i, ++j) {
|
2019-06-24 15:22:55 +08:00
|
|
|
struct page *const page = rq->out[i];
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
struct page *victim;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
if (j >= LZ4_MAX_DISTANCE_PAGES)
|
|
|
|
j = 0;
|
|
|
|
|
|
|
|
/* 'valid' bounced can only be tested after a complete round */
|
|
|
|
if (test_bit(j, bounced)) {
|
|
|
|
DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
|
|
|
|
DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
|
|
|
|
availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
|
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
|
|
|
|
if (page) {
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
__clear_bit(j, bounced);
|
2019-06-24 15:22:55 +08:00
|
|
|
if (kaddr) {
|
|
|
|
if (kaddr + PAGE_SIZE == page_address(page))
|
|
|
|
kaddr += PAGE_SIZE;
|
|
|
|
else
|
|
|
|
kaddr = NULL;
|
|
|
|
} else if (!i) {
|
|
|
|
kaddr = page_address(page);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
kaddr = NULL;
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
__set_bit(j, bounced);
|
2019-06-24 15:22:55 +08:00
|
|
|
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
if (top) {
|
|
|
|
victim = availables[--top];
|
|
|
|
get_page(victim);
|
2019-06-24 15:22:55 +08:00
|
|
|
} else {
|
2019-07-31 23:57:43 +08:00
|
|
|
victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
|
2019-08-30 00:38:27 +08:00
|
|
|
if (!victim)
|
2019-07-31 23:57:43 +08:00
|
|
|
return -ENOMEM;
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
victim->mapping = Z_EROFS_MAPPING_STAGING;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
staging: erofs: fix LZ4 limited bounced page mis-reuse
Like all lz77-based algrithms, lz4 has a dynamically populated
("sliding window") dictionary and the maximum lookback distance
is 65535. Therefore the number of bounced pages could be limited
by erofs based on this property.
However, just now we observed some lz4 sequences in the extreme
case cannot be decompressed correctly after this feature is enabled,
the root causes after analysis are clear as follows:
1) max bounced pages should be 17 rather than 16 pages;
2) considering the following case, the broken implementation
could reuse unsafely in advance (in other words, reuse it
less than a safe distance),
0 1 2 ... 16 17 18 ... 33 34
b p b b
note that the bounce page that we are concerned was allocated
at 0, and it reused at 18 since page 17 exists, but it mis-reused
at 34 in advance again, which causes decompress failure.
This patch resolves the issue by introducing a bitmap to mark
whether the page in the same position of last round is a bounced
page or not, and a micro stack data structure to store all
available bounced pages.
Fixes: 7fc45dbc938a ("staging: erofs: introduce generic decompression backend")
Reviewed-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Gao Xiang <gaoxiang25@huawei.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2019-07-03 14:52:09 +08:00
|
|
|
rq->out[i] = victim;
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
return kaddr ? 1 : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
|
|
|
|
u8 *src, unsigned int pageofs_in)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* if in-place decompression is ongoing, those decompressed
|
|
|
|
* pages should be copied in order to avoid being overlapped.
|
|
|
|
*/
|
|
|
|
struct page **in = rq->in;
|
|
|
|
u8 *const tmp = erofs_get_pcpubuf(0);
|
|
|
|
u8 *tmpp = tmp;
|
|
|
|
unsigned int inlen = rq->inputsize - pageofs_in;
|
|
|
|
unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
|
|
|
|
|
|
|
|
while (tmpp < tmp + inlen) {
|
|
|
|
if (!src)
|
|
|
|
src = kmap_atomic(*in);
|
|
|
|
memcpy(tmpp, src + pageofs_in, count);
|
|
|
|
kunmap_atomic(src);
|
|
|
|
src = NULL;
|
|
|
|
tmpp += count;
|
|
|
|
pageofs_in = 0;
|
|
|
|
count = PAGE_SIZE;
|
|
|
|
++in;
|
|
|
|
}
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
|
|
|
|
{
|
|
|
|
unsigned int inputmargin, inlen;
|
|
|
|
u8 *src;
|
2019-06-24 15:22:56 +08:00
|
|
|
bool copied, support_0padding;
|
2019-06-24 15:22:55 +08:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (rq->inputsize > PAGE_SIZE)
|
2019-08-14 18:37:05 +08:00
|
|
|
return -EOPNOTSUPP;
|
2019-06-24 15:22:55 +08:00
|
|
|
|
|
|
|
src = kmap_atomic(*rq->in);
|
|
|
|
inputmargin = 0;
|
2019-06-24 15:22:56 +08:00
|
|
|
support_0padding = false;
|
|
|
|
|
|
|
|
/* decompression inplace is only safe when 0padding is enabled */
|
|
|
|
if (EROFS_SB(rq->sb)->requirements & EROFS_REQUIREMENT_LZ4_0PADDING) {
|
|
|
|
support_0padding = true;
|
|
|
|
|
|
|
|
while (!src[inputmargin & ~PAGE_MASK])
|
|
|
|
if (!(++inputmargin & ~PAGE_MASK))
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (inputmargin >= rq->inputsize) {
|
|
|
|
kunmap_atomic(src);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
|
|
|
|
copied = false;
|
|
|
|
inlen = rq->inputsize - inputmargin;
|
|
|
|
if (rq->inplace_io) {
|
2019-06-24 15:22:56 +08:00
|
|
|
const uint oend = (rq->pageofs_out +
|
|
|
|
rq->outputsize) & ~PAGE_MASK;
|
|
|
|
const uint nr = PAGE_ALIGN(rq->pageofs_out +
|
|
|
|
rq->outputsize) >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
if (rq->partial_decoding || !support_0padding ||
|
|
|
|
rq->out[nr - 1] != rq->in[0] ||
|
|
|
|
rq->inputsize - oend <
|
|
|
|
LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
|
|
|
|
src = generic_copy_inplace_data(rq, src, inputmargin);
|
|
|
|
inputmargin = 0;
|
|
|
|
copied = true;
|
|
|
|
}
|
2019-06-24 15:22:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ret = LZ4_decompress_safe_partial(src + inputmargin, out,
|
|
|
|
inlen, rq->outputsize,
|
|
|
|
rq->outputsize);
|
|
|
|
if (ret < 0) {
|
|
|
|
errln("%s, failed to decompress, in[%p, %u, %u] out[%p, %u]",
|
|
|
|
__func__, src + inputmargin, inlen, inputmargin,
|
|
|
|
out, rq->outputsize);
|
|
|
|
WARN_ON(1);
|
|
|
|
print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
|
|
|
|
16, 1, src + inputmargin, inlen, true);
|
|
|
|
print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
|
|
|
|
16, 1, out, rq->outputsize, true);
|
|
|
|
ret = -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (copied)
|
|
|
|
erofs_put_pcpubuf(src);
|
|
|
|
else
|
|
|
|
kunmap_atomic(src);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct z_erofs_decompressor decompressors[] = {
|
|
|
|
[Z_EROFS_COMPRESSION_SHIFTED] = {
|
|
|
|
.name = "shifted"
|
|
|
|
},
|
|
|
|
[Z_EROFS_COMPRESSION_LZ4] = {
|
|
|
|
.prepare_destpages = lz4_prepare_destpages,
|
|
|
|
.decompress = lz4_decompress,
|
|
|
|
.name = "lz4"
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static void copy_from_pcpubuf(struct page **out, const char *dst,
|
|
|
|
unsigned short pageofs_out,
|
|
|
|
unsigned int outputsize)
|
|
|
|
{
|
|
|
|
const char *end = dst + outputsize;
|
|
|
|
const unsigned int righthalf = PAGE_SIZE - pageofs_out;
|
|
|
|
const char *cur = dst - pageofs_out;
|
|
|
|
|
|
|
|
while (cur < end) {
|
|
|
|
struct page *const page = *out++;
|
|
|
|
|
|
|
|
if (page) {
|
|
|
|
char *buf = kmap_atomic(page);
|
|
|
|
|
|
|
|
if (cur >= dst) {
|
|
|
|
memcpy(buf, cur, min_t(uint, PAGE_SIZE,
|
|
|
|
end - cur));
|
|
|
|
} else {
|
|
|
|
memcpy(buf + pageofs_out, cur + pageofs_out,
|
|
|
|
min_t(uint, righthalf, end - cur));
|
|
|
|
}
|
|
|
|
kunmap_atomic(buf);
|
|
|
|
}
|
|
|
|
cur += PAGE_SIZE;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-31 23:57:36 +08:00
|
|
|
static void *erofs_vmap(struct page **pages, unsigned int count)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
2019-07-31 23:57:44 +08:00
|
|
|
if (use_vmap)
|
|
|
|
return vmap(pages, count, VM_MAP, PAGE_KERNEL);
|
|
|
|
|
2019-07-31 23:57:36 +08:00
|
|
|
while (1) {
|
|
|
|
void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
|
2019-07-31 23:57:44 +08:00
|
|
|
|
2019-07-31 23:57:36 +08:00
|
|
|
/* retry two more times (totally 3 times) */
|
|
|
|
if (addr || ++i >= 3)
|
|
|
|
return addr;
|
|
|
|
vm_unmap_aliases();
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void erofs_vunmap(const void *mem, unsigned int count)
|
|
|
|
{
|
2019-07-31 23:57:44 +08:00
|
|
|
if (!use_vmap)
|
|
|
|
vm_unmap_ram(mem, count);
|
|
|
|
else
|
|
|
|
vunmap(mem);
|
2019-07-31 23:57:36 +08:00
|
|
|
}
|
|
|
|
|
2019-06-24 15:22:55 +08:00
|
|
|
static int decompress_generic(struct z_erofs_decompress_req *rq,
|
|
|
|
struct list_head *pagepool)
|
|
|
|
{
|
|
|
|
const unsigned int nrpages_out =
|
|
|
|
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
|
|
|
const struct z_erofs_decompressor *alg = decompressors + rq->alg;
|
|
|
|
unsigned int dst_maptype;
|
|
|
|
void *dst;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (nrpages_out == 1 && !rq->inplace_io) {
|
|
|
|
DBG_BUGON(!*rq->out);
|
|
|
|
dst = kmap_atomic(*rq->out);
|
|
|
|
dst_maptype = 0;
|
|
|
|
goto dstmap_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* For the case of small output size (especially much less
|
|
|
|
* than PAGE_SIZE), memcpy the decompressed data rather than
|
|
|
|
* compressed data is preferred.
|
|
|
|
*/
|
|
|
|
if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
|
|
|
|
dst = erofs_get_pcpubuf(0);
|
|
|
|
if (IS_ERR(dst))
|
|
|
|
return PTR_ERR(dst);
|
|
|
|
|
|
|
|
rq->inplace_io = false;
|
|
|
|
ret = alg->decompress(rq, dst);
|
|
|
|
if (!ret)
|
|
|
|
copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
|
|
|
|
rq->outputsize);
|
|
|
|
|
|
|
|
erofs_put_pcpubuf(dst);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = alg->prepare_destpages(rq, pagepool);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
} else if (ret) {
|
|
|
|
dst = page_address(*rq->out);
|
|
|
|
dst_maptype = 1;
|
|
|
|
goto dstmap_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
dst = erofs_vmap(rq->out, nrpages_out);
|
|
|
|
if (!dst)
|
|
|
|
return -ENOMEM;
|
|
|
|
dst_maptype = 2;
|
|
|
|
|
|
|
|
dstmap_out:
|
|
|
|
ret = alg->decompress(rq, dst + rq->pageofs_out);
|
|
|
|
|
|
|
|
if (!dst_maptype)
|
|
|
|
kunmap_atomic(dst);
|
|
|
|
else if (dst_maptype == 2)
|
|
|
|
erofs_vunmap(dst, nrpages_out);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int shifted_decompress(const struct z_erofs_decompress_req *rq,
|
|
|
|
struct list_head *pagepool)
|
|
|
|
{
|
|
|
|
const unsigned int nrpages_out =
|
|
|
|
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
|
|
|
|
const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
|
|
|
|
unsigned char *src, *dst;
|
|
|
|
|
|
|
|
if (nrpages_out > 2) {
|
|
|
|
DBG_BUGON(1);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rq->out[0] == *rq->in) {
|
|
|
|
DBG_BUGON(nrpages_out != 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
src = kmap_atomic(*rq->in);
|
|
|
|
if (!rq->out[0]) {
|
|
|
|
dst = NULL;
|
|
|
|
} else {
|
|
|
|
dst = kmap_atomic(rq->out[0]);
|
|
|
|
memcpy(dst + rq->pageofs_out, src, righthalf);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rq->out[1] == *rq->in) {
|
|
|
|
memmove(src, src + righthalf, rq->pageofs_out);
|
|
|
|
} else if (nrpages_out == 2) {
|
|
|
|
if (dst)
|
|
|
|
kunmap_atomic(dst);
|
|
|
|
DBG_BUGON(!rq->out[1]);
|
|
|
|
dst = kmap_atomic(rq->out[1]);
|
|
|
|
memcpy(dst, src + righthalf, rq->pageofs_out);
|
|
|
|
}
|
|
|
|
if (dst)
|
|
|
|
kunmap_atomic(dst);
|
|
|
|
kunmap_atomic(src);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int z_erofs_decompress(struct z_erofs_decompress_req *rq,
|
|
|
|
struct list_head *pagepool)
|
|
|
|
{
|
|
|
|
if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
|
|
|
|
return shifted_decompress(rq, pagepool);
|
|
|
|
return decompress_generic(rq, pagepool);
|
|
|
|
}
|
|
|
|
|