pstore improvements:

- refactor init to happen as early as possible again (Joel Fernandes)
 - improve resource reservation names
 -----BEGIN PGP SIGNATURE-----
 Comment: Kees Cook <kees@outflux.net>
 
 iQJKBAABCgA0FiEEpcP2jyKd1g9yPm4TiXL039xtwCYFAlvN3UwWHGtlZXNjb29r
 QGNocm9taXVtLm9yZwAKCRCJcvTf3G3AJkiZD/0Xx72AvLGBOBMmnTm1cP+p8A6k
 wLG4ThW5Hg7ArQ5RSsADFr2jidIFFyq6I7k0U5oj4E/hS9chbNQjvbzXCaNbkl5O
 TYy7usATrjLcR6ivGFKM1eTuN9rFb7zaWKkh08ORf5+aP/yS0yezdLSbGqHiJyas
 MJ/HvFRPeN6tqd6qyDme7WkOrdGyGWSs3VV44izvBqo4Ub7JFRmjegJOhyEh0TRf
 jobpkuEw0EzTiVqDyIBtqJdhZRiWzScS5gwNi0L6QOlsnnRoAVEYGKhBMEhLCtBx
 nUDZdaC0FhsjRXdqbt08ylQ8bRU6xKWLvKrQ4xdbDwFC4oI8H+ZVg0YUfhp3juH8
 wlvo1MoHJJryDQCTrqvW4KY8Hkz3uF5vE8KoEo6wX2+o9mRw+H/ArCL1pMQ15eIH
 3yPESbkSW/SOOehFcFp2IosqE2XrflzJLQ1IRgoe/E7rO99Kpp9INZZMT0jNtoHx
 2E/u6DpCPrQk+5ko+we/jfu4P2SoctpLSnN87O5mI9SD7fjpBOle1y0vo/gUEYsL
 0mB165FdP7Qjqc+vqDT3VxyY/44ZEZI0kJYyE7k0nLkEijSagLyI750qpyB4DN95
 Y10sPrDFICyhC7N+uOTGG/Ey4mIdpp6tiWsPbF9TLewdsM3EfvkzmYPSWUYaEDp3
 MCZ2680KUHdMHPidBA==
 =fe5o
 -----END PGP SIGNATURE-----

Merge tag 'pstore-v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux

Pull pstore updates from Kees Cook:
 "pstore improvements:

   - refactor init to happen as early as possible again (Joel Fernandes)

   - improve resource reservation names"

* tag 'pstore-v4.20-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux:
  pstore/ram: Clarify resource reservation labels
  pstore: Refactor compression initialization
  pstore: Allocate compression during late_initcall()
  pstore: Centralize init/exit routines
This commit is contained in:
Linus Torvalds 2018-10-24 14:42:02 +01:00
commit 08ffb584d9
6 changed files with 92 additions and 35 deletions

View File

@ -482,12 +482,10 @@ static struct file_system_type pstore_fs_type = {
.kill_sb = pstore_kill_sb, .kill_sb = pstore_kill_sb,
}; };
static int __init init_pstore_fs(void) int __init pstore_init_fs(void)
{ {
int err; int err;
pstore_choose_compression();
/* Create a convenient mount point for people to access pstore */ /* Create a convenient mount point for people to access pstore */
err = sysfs_create_mount_point(fs_kobj, "pstore"); err = sysfs_create_mount_point(fs_kobj, "pstore");
if (err) if (err)
@ -500,14 +498,9 @@ static int __init init_pstore_fs(void)
out: out:
return err; return err;
} }
module_init(init_pstore_fs)
static void __exit exit_pstore_fs(void) void __exit pstore_exit_fs(void)
{ {
unregister_filesystem(&pstore_fs_type); unregister_filesystem(&pstore_fs_type);
sysfs_remove_mount_point(fs_kobj, "pstore"); sysfs_remove_mount_point(fs_kobj, "pstore");
} }
module_exit(exit_pstore_fs)
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");

View File

@ -37,7 +37,8 @@ extern bool pstore_is_mounted(void);
extern void pstore_record_init(struct pstore_record *record, extern void pstore_record_init(struct pstore_record *record,
struct pstore_info *psi); struct pstore_info *psi);
/* Called during module_init() */ /* Called during pstore init/exit. */
extern void __init pstore_choose_compression(void); int __init pstore_init_fs(void);
void __exit pstore_exit_fs(void);
#endif #endif

View File

@ -274,36 +274,56 @@ static int pstore_decompress(void *in, void *out,
static void allocate_buf_for_compression(void) static void allocate_buf_for_compression(void)
{ {
struct crypto_comp *ctx;
int size;
char *buf;
/* Skip if not built-in or compression backend not selected yet. */
if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend) if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !zbackend)
return; return;
/* Skip if no pstore backend yet or compression init already done. */
if (!psinfo || tfm)
return;
if (!crypto_has_comp(zbackend->name, 0, 0)) { if (!crypto_has_comp(zbackend->name, 0, 0)) {
pr_err("No %s compression\n", zbackend->name); pr_err("Unknown compression: %s\n", zbackend->name);
return; return;
} }
big_oops_buf_sz = zbackend->zbufsize(psinfo->bufsize); size = zbackend->zbufsize(psinfo->bufsize);
if (big_oops_buf_sz <= 0) if (size <= 0) {
return; pr_err("Invalid compression size for %s: %d\n",
zbackend->name, size);
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (!big_oops_buf) {
pr_err("allocate compression buffer error!\n");
return; return;
} }
tfm = crypto_alloc_comp(zbackend->name, 0, 0); buf = kmalloc(size, GFP_KERNEL);
if (IS_ERR_OR_NULL(tfm)) { if (!buf) {
kfree(big_oops_buf); pr_err("Failed %d byte compression buffer allocation for: %s\n",
big_oops_buf = NULL; size, zbackend->name);
pr_err("crypto_alloc_comp() failed!\n");
return; return;
} }
ctx = crypto_alloc_comp(zbackend->name, 0, 0);
if (IS_ERR_OR_NULL(ctx)) {
kfree(buf);
pr_err("crypto_alloc_comp('%s') failed: %ld\n", zbackend->name,
PTR_ERR(ctx));
return;
}
/* A non-NULL big_oops_buf indicates compression is available. */
tfm = ctx;
big_oops_buf_sz = size;
big_oops_buf = buf;
pr_info("Using compression: %s\n", zbackend->name);
} }
static void free_buf_for_compression(void) static void free_buf_for_compression(void)
{ {
if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && !IS_ERR_OR_NULL(tfm)) if (IS_ENABLED(CONFIG_PSTORE_COMPRESS) && tfm)
crypto_free_comp(tfm); crypto_free_comp(tfm);
kfree(big_oops_buf); kfree(big_oops_buf);
big_oops_buf = NULL; big_oops_buf = NULL;
@ -774,14 +794,43 @@ void __init pstore_choose_compression(void)
for (step = zbackends; step->name; step++) { for (step = zbackends; step->name; step++) {
if (!strcmp(compress, step->name)) { if (!strcmp(compress, step->name)) {
zbackend = step; zbackend = step;
pr_info("using %s compression\n", zbackend->name);
return; return;
} }
} }
} }
static int __init pstore_init(void)
{
int ret;
pstore_choose_compression();
/*
* Check if any pstore backends registered earlier but did not
* initialize compression because crypto was not ready. If so,
* initialize compression now.
*/
allocate_buf_for_compression();
ret = pstore_init_fs();
if (ret)
return ret;
return 0;
}
late_initcall(pstore_init);
static void __exit pstore_exit(void)
{
pstore_exit_fs();
}
module_exit(pstore_exit)
module_param(compress, charp, 0444); module_param(compress, charp, 0444);
MODULE_PARM_DESC(compress, "Pstore compression to use"); MODULE_PARM_DESC(compress, "Pstore compression to use");
module_param(backend, charp, 0444); module_param(backend, charp, 0444);
MODULE_PARM_DESC(backend, "Pstore backend to use"); MODULE_PARM_DESC(backend, "Pstore backend to use");
MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
MODULE_LICENSE("GPL");

View File

@ -587,9 +587,16 @@ static int ramoops_init_przs(const char *name,
goto fail; goto fail;
for (i = 0; i < *cnt; i++) { for (i = 0; i < *cnt; i++) {
char *label;
if (*cnt == 1)
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
else
label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
name, i, *cnt - 1);
prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig, prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
&cxt->ecc_info, &cxt->ecc_info,
cxt->memtype, flags); cxt->memtype, flags, label);
if (IS_ERR(prz_ar[i])) { if (IS_ERR(prz_ar[i])) {
err = PTR_ERR(prz_ar[i]); err = PTR_ERR(prz_ar[i]);
dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n", dev_err(dev, "failed to request %s mem region (0x%zx@0x%llx): %d\n",
@ -619,6 +626,8 @@ static int ramoops_init_prz(const char *name,
struct persistent_ram_zone **prz, struct persistent_ram_zone **prz,
phys_addr_t *paddr, size_t sz, u32 sig) phys_addr_t *paddr, size_t sz, u32 sig)
{ {
char *label;
if (!sz) if (!sz)
return 0; return 0;
@ -629,8 +638,9 @@ static int ramoops_init_prz(const char *name,
return -ENOMEM; return -ENOMEM;
} }
label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
cxt->memtype, 0); cxt->memtype, 0, label);
if (IS_ERR(*prz)) { if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz); int err = PTR_ERR(*prz);
@ -962,7 +972,7 @@ static int __init ramoops_init(void)
return ret; return ret;
} }
late_initcall(ramoops_init); postcore_initcall(ramoops_init);
static void __exit ramoops_exit(void) static void __exit ramoops_exit(void)
{ {

View File

@ -438,11 +438,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size,
} }
static void *persistent_ram_iomap(phys_addr_t start, size_t size, static void *persistent_ram_iomap(phys_addr_t start, size_t size,
unsigned int memtype) unsigned int memtype, char *label)
{ {
void *va; void *va;
if (!request_mem_region(start, size, "persistent_ram")) { if (!request_mem_region(start, size, label ?: "ramoops")) {
pr_err("request mem region (0x%llx@0x%llx) failed\n", pr_err("request mem region (0x%llx@0x%llx) failed\n",
(unsigned long long)size, (unsigned long long)start); (unsigned long long)size, (unsigned long long)start);
return NULL; return NULL;
@ -470,7 +470,8 @@ static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
if (pfn_valid(start >> PAGE_SHIFT)) if (pfn_valid(start >> PAGE_SHIFT))
prz->vaddr = persistent_ram_vmap(start, size, memtype); prz->vaddr = persistent_ram_vmap(start, size, memtype);
else else
prz->vaddr = persistent_ram_iomap(start, size, memtype); prz->vaddr = persistent_ram_iomap(start, size, memtype,
prz->label);
if (!prz->vaddr) { if (!prz->vaddr) {
pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@ -541,12 +542,13 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
prz->ecc_info.par = NULL; prz->ecc_info.par = NULL;
persistent_ram_free_old(prz); persistent_ram_free_old(prz);
kfree(prz->label);
kfree(prz); kfree(prz);
} }
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info, u32 sig, struct persistent_ram_ecc_info *ecc_info,
unsigned int memtype, u32 flags) unsigned int memtype, u32 flags, char *label)
{ {
struct persistent_ram_zone *prz; struct persistent_ram_zone *prz;
int ret = -ENOMEM; int ret = -ENOMEM;
@ -560,6 +562,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
/* Initialize general buffer state. */ /* Initialize general buffer state. */
raw_spin_lock_init(&prz->buffer_lock); raw_spin_lock_init(&prz->buffer_lock);
prz->flags = flags; prz->flags = flags;
prz->label = label;
ret = persistent_ram_buffer_map(start, size, prz, memtype); ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret) if (ret)

View File

@ -46,6 +46,7 @@ struct persistent_ram_zone {
phys_addr_t paddr; phys_addr_t paddr;
size_t size; size_t size;
void *vaddr; void *vaddr;
char *label;
struct persistent_ram_buffer *buffer; struct persistent_ram_buffer *buffer;
size_t buffer_size; size_t buffer_size;
u32 flags; u32 flags;
@ -65,7 +66,7 @@ struct persistent_ram_zone {
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info, u32 sig, struct persistent_ram_ecc_info *ecc_info,
unsigned int memtype, u32 flags); unsigned int memtype, u32 flags, char *label);
void persistent_ram_free(struct persistent_ram_zone *prz); void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz); void persistent_ram_zap(struct persistent_ram_zone *prz);