[PATCH] slab: remove kmem_cache_t

Replace all uses of kmem_cache_t with struct kmem_cache.

The patch was generated using the following script:

	#!/bin/sh
	#
	# Replace one string by another in all the kernel sources.
	#

	set -e

	for file in `find * -name "*.c" -o -name "*.h"|xargs grep -l $1`; do
		quilt add $file
		sed -e "1,\$s/$1/$2/g" $file >/tmp/$$
		mv /tmp/$$ $file
		quilt refresh
	done

The script was run like this

	sh replace kmem_cache_t "struct kmem_cache"

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-12-06 20:33:20 -08:00 committed by Linus Torvalds
parent 441e143e95
commit e18b890bb0
189 changed files with 332 additions and 332 deletions

View File

@ -77,7 +77,7 @@ To get this part of the dma_ API, you must #include <linux/dmapool.h>
Many drivers need lots of small dma-coherent memory regions for DMA
descriptors or I/O buffers. Rather than allocating in units of a page
or more using dma_alloc_coherent(), you can use DMA pools. These work
much like a kmem_cache_t, except that they use the dma-coherent allocator
much like a struct kmem_cache, except that they use the dma-coherent allocator
not __get_free_pages(). Also, they understand common hardware constraints
for alignment, like queue heads needing to be aligned on N byte boundaries.
@ -94,7 +94,7 @@ The pool create() routines initialize a pool of dma-coherent buffers
for use with a given device. It must be called in a context which
can sleep.
The "name" is for diagnostics (like a kmem_cache_t name); dev and size
The "name" is for diagnostics (like a struct kmem_cache name); dev and size
are like what you'd pass to dma_alloc_coherent(). The device's hardware
alignment requirement for this type of data is "align" (which is expressed
in bytes, and must be a power of two). If your device has no boundary

View File

@ -40,7 +40,7 @@
/* io map for dma */
static void __iomem *dma_base;
static kmem_cache_t *dma_kmem;
static struct kmem_cache *dma_kmem;
struct s3c24xx_dma_selection dma_sel;
@ -1271,7 +1271,7 @@ struct sysdev_class dma_sysclass = {
/* kmem cache implementation */
static void s3c2410_dma_cache_ctor(void *p, kmem_cache_t *c, unsigned long f)
static void s3c2410_dma_cache_ctor(void *p, struct kmem_cache *c, unsigned long f)
{
memset(p, 0, sizeof(struct s3c2410_dma_buf));
}

View File

@ -24,7 +24,7 @@
#define MEMC_TABLE_SIZE (256*sizeof(unsigned long))
kmem_cache_t *pte_cache, *pgd_cache;
struct kmem_cache *pte_cache, *pgd_cache;
int page_nr;
/*
@ -162,12 +162,12 @@ void __init create_memmap_holes(struct meminfo *mi)
{
}
static void pte_cache_ctor(void *pte, kmem_cache_t *cache, unsigned long flags)
static void pte_cache_ctor(void *pte, struct kmem_cache *cache, unsigned long flags)
{
memzero(pte, sizeof(pte_t) * PTRS_PER_PTE);
}
static void pgd_cache_ctor(void *pgd, kmem_cache_t *cache, unsigned long flags)
static void pgd_cache_ctor(void *pgd, struct kmem_cache *cache, unsigned long flags)
{
memzero(pgd + MEMC_TABLE_SIZE, USER_PTRS_PER_PGD * sizeof(pgd_t));
}

View File

@ -18,7 +18,7 @@
#include <asm/cacheflush.h>
pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
kmem_cache_t *pgd_cache;
struct kmem_cache *pgd_cache;
pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
@ -100,7 +100,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long) pprev);
}
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags;
@ -120,7 +120,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
}
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */

View File

@ -699,8 +699,8 @@ int remove_memory(u64 start, u64 size)
#endif
#endif
kmem_cache_t *pgd_cache;
kmem_cache_t *pmd_cache;
struct kmem_cache *pgd_cache;
struct kmem_cache *pmd_cache;
void __init pgtable_cache_init(void)
{

View File

@ -193,7 +193,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
return pte;
}
void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
{
memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
}
@ -233,7 +233,7 @@ static inline void pgd_list_del(pgd_t *pgd)
set_page_private(next, (unsigned long)pprev);
}
void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags;
@ -253,7 +253,7 @@ void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
}
/* never called when PTRS_PER_PMD > 1 */
void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
{
unsigned long flags; /* can be called from interrupt context */

View File

@ -249,7 +249,7 @@ ia32_init (void)
#if PAGE_SHIFT > IA32_PAGE_SHIFT
{
extern kmem_cache_t *partial_page_cachep;
extern struct kmem_cache *partial_page_cachep;
partial_page_cachep = kmem_cache_create("partial_page_cache",
sizeof(struct partial_page), 0, 0,

View File

@ -254,7 +254,7 @@ mmap_subpage (struct file *file, unsigned long start, unsigned long end, int pro
}
/* SLAB cache for partial_page structures */
kmem_cache_t *partial_page_cachep;
struct kmem_cache *partial_page_cachep;
/*
* init partial_page_list.

View File

@ -101,7 +101,7 @@ struct flash_block_list_header { /* just the header of flash_block_list */
static struct flash_block_list_header rtas_firmware_flash_list = {0, NULL};
/* Use slab cache to guarantee 4k alignment */
static kmem_cache_t *flash_block_cache = NULL;
static struct kmem_cache *flash_block_cache = NULL;
#define FLASH_BLOCK_LIST_VERSION (1UL)
@ -286,7 +286,7 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
}
/* constructor for flash_block_cache */
void rtas_block_ctor(void *ptr, kmem_cache_t *cache, unsigned long flags)
void rtas_block_ctor(void *ptr, struct kmem_cache *cache, unsigned long flags)
{
memset(ptr, 0, RTAS_BLK_SIZE);
}

View File

@ -1047,7 +1047,7 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
return err;
}
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}

View File

@ -141,7 +141,7 @@ static int __init setup_kcore(void)
}
module_init(setup_kcore);
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
memset(addr, 0, kmem_cache_size(cache));
}
@ -166,9 +166,9 @@ static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
/* Hugepages need one extra cache, initialized in hugetlbpage.c. We
* can't put into the tables above, because HPAGE_SHIFT is not compile
* time constant. */
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)+1];
#else
kmem_cache_t *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
struct kmem_cache *pgtable_cache[ARRAY_SIZE(pgtable_cache_size)];
#endif
void pgtable_cache_init(void)

View File

@ -40,7 +40,7 @@
#include "spufs.h"
static kmem_cache_t *spufs_inode_cache;
static struct kmem_cache *spufs_inode_cache;
char *isolated_loader;
static struct inode *
@ -65,7 +65,7 @@ spufs_destroy_inode(struct inode *inode)
}
static void
spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags)
spufs_init_once(void *p, struct kmem_cache * cachep, unsigned long flags)
{
struct spufs_inode_info *ei = p;

View File

@ -38,7 +38,7 @@ struct sq_mapping {
static struct sq_mapping *sq_mapping_list;
static DEFINE_SPINLOCK(sq_mapping_lock);
static kmem_cache_t *sq_cache;
static struct kmem_cache *sq_cache;
static unsigned long *sq_bitmap;
#define store_queue_barrier() \

View File

@ -30,7 +30,7 @@
#define NR_PMB_ENTRIES 16
static kmem_cache_t *pmb_cache;
static struct kmem_cache *pmb_cache;
static unsigned long pmb_map;
static struct pmb_entry pmb_init_map[] = {
@ -283,7 +283,7 @@ void pmb_unmap(unsigned long addr)
} while (pmbe);
}
static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
static void pmb_cache_ctor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{
struct pmb_entry *pmbe = pmb;
@ -297,7 +297,7 @@ static void pmb_cache_ctor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
spin_unlock_irq(&pmb_list_lock);
}
static void pmb_cache_dtor(void *pmb, kmem_cache_t *cachep, unsigned long flags)
static void pmb_cache_dtor(void *pmb, struct kmem_cache *cachep, unsigned long flags)
{
spin_lock_irq(&pmb_list_lock);
pmb_list_del(pmb);

View File

@ -176,9 +176,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0;
kmem_cache_t *pgtable_cache __read_mostly;
struct kmem_cache *pgtable_cache __read_mostly;
static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
static void zero_ctor(void *addr, struct kmem_cache *cache, unsigned long flags)
{
clear_page(addr);
}

View File

@ -239,7 +239,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
}
}
static kmem_cache_t *tsb_caches[8] __read_mostly;
static struct kmem_cache *tsb_caches[8] __read_mostly;
static const char *tsb_cache_names[8] = {
"tsb_8KB",

View File

@ -43,8 +43,8 @@ static int cfq_slice_idle = HZ / 125;
#define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq) ((rq)->elevator_private2)
static kmem_cache_t *cfq_pool;
static kmem_cache_t *cfq_ioc_pool;
static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;

View File

@ -44,17 +44,17 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node);
/*
* For the allocated request tables
*/
static kmem_cache_t *request_cachep;
static struct kmem_cache *request_cachep;
/*
* For queue allocation
*/
static kmem_cache_t *requestq_cachep;
static struct kmem_cache *requestq_cachep;
/*
* For io context allocations
*/
static kmem_cache_t *iocontext_cachep;
static struct kmem_cache *iocontext_cachep;
/*
* Controlling structure to kblockd

View File

@ -12,7 +12,7 @@
#include <linux/netdevice.h>
#include "aoe.h"
static kmem_cache_t *buf_pool_cache;
static struct kmem_cache *buf_pool_cache;
static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
{

View File

@ -133,7 +133,7 @@ struct eth1394_node_info {
#define ETH1394_DRIVER_NAME "eth1394"
static const char driver_name[] = ETH1394_DRIVER_NAME;
static kmem_cache_t *packet_task_cache;
static struct kmem_cache *packet_task_cache;
static struct hpsb_highlevel eth1394_highlevel;

View File

@ -101,7 +101,7 @@ struct crypt_config {
#define MIN_POOL_PAGES 32
#define MIN_BIO_PAGES 8
static kmem_cache_t *_crypt_io_pool;
static struct kmem_cache *_crypt_io_pool;
/*
* Different IV generation algorithms:

View File

@ -101,7 +101,7 @@ typedef int (*action_fn) (struct pgpath *pgpath);
#define MIN_IOS 256 /* Mempool size */
static kmem_cache_t *_mpio_cache;
static struct kmem_cache *_mpio_cache;
struct workqueue_struct *kmultipathd;
static void process_queued_ios(struct work_struct *work);

View File

@ -88,8 +88,8 @@ struct pending_exception {
* Hash table mapping origin volumes to lists of snapshots and
* a lock to protect it
*/
static kmem_cache_t *exception_cache;
static kmem_cache_t *pending_cache;
static struct kmem_cache *exception_cache;
static struct kmem_cache *pending_cache;
static mempool_t *pending_pool;
/*
@ -228,7 +228,7 @@ static int init_exception_table(struct exception_table *et, uint32_t size)
return 0;
}
static void exit_exception_table(struct exception_table *et, kmem_cache_t *mem)
static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
{
struct list_head *slot;
struct exception *ex, *next;

View File

@ -121,8 +121,8 @@ struct mapped_device {
};
#define MIN_IOS 256
static kmem_cache_t *_io_cache;
static kmem_cache_t *_tio_cache;
static struct kmem_cache *_io_cache;
static struct kmem_cache *_tio_cache;
static int __init local_init(void)
{

View File

@ -203,7 +203,7 @@ struct kcopyd_job {
/* FIXME: this should scale with the number of pages */
#define MIN_JOBS 512
static kmem_cache_t *_job_cache;
static struct kmem_cache *_job_cache;
static mempool_t *_job_pool;
/*

View File

@ -348,7 +348,7 @@ static int grow_one_stripe(raid5_conf_t *conf)
static int grow_stripes(raid5_conf_t *conf, int num)
{
kmem_cache_t *sc;
struct kmem_cache *sc;
int devs = conf->raid_disks;
sprintf(conf->cache_name[0], "raid5/%s", mdname(conf->mddev));
@ -397,7 +397,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize)
LIST_HEAD(newstripes);
struct disk_info *ndisks;
int err = 0;
kmem_cache_t *sc;
struct kmem_cache *sc;
int i;
if (newsize <= conf->pool_size)

View File

@ -64,7 +64,7 @@
/* I2O Block OSM mempool struct */
struct i2o_block_mempool {
kmem_cache_t *slab;
struct kmem_cache *slab;
mempool_t *pool;
};

View File

@ -26,7 +26,7 @@
static DEFINE_SPINLOCK(msi_lock);
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
static kmem_cache_t* msi_cachep;
static struct kmem_cache* msi_cachep;
static int pci_msi_enable = 1;

View File

@ -25,7 +25,7 @@
#include "dasd_int.h"
kmem_cache_t *dasd_page_cache;
struct kmem_cache *dasd_page_cache;
EXPORT_SYMBOL_GPL(dasd_page_cache);
/*

View File

@ -474,7 +474,7 @@ extern struct dasd_profile_info_t dasd_global_profile;
extern unsigned int dasd_profile_level;
extern struct block_device_operations dasd_device_operations;
extern kmem_cache_t *dasd_page_cache;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
dasd_kmalloc_request(char *, int, int, struct dasd_device *);

View File

@ -1032,9 +1032,9 @@ struct zfcp_data {
wwn_t init_wwpn;
fcp_lun_t init_fcp_lun;
char *driver_version;
kmem_cache_t *fsf_req_qtcb_cache;
kmem_cache_t *sr_buffer_cache;
kmem_cache_t *gid_pn_cache;
struct kmem_cache *fsf_req_qtcb_cache;
struct kmem_cache *sr_buffer_cache;
struct kmem_cache *gid_pn_cache;
};
/**

View File

@ -56,8 +56,8 @@
/* 2*ITNL timeout + 1 second */
#define AIC94XX_SCB_TIMEOUT (5*HZ)
extern kmem_cache_t *asd_dma_token_cache;
extern kmem_cache_t *asd_ascb_cache;
extern struct kmem_cache *asd_dma_token_cache;
extern struct kmem_cache *asd_ascb_cache;
extern char sas_addr_str[2*SAS_ADDR_SIZE + 1];
static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)

View File

@ -1047,7 +1047,7 @@ irqreturn_t asd_hw_isr(int irq, void *dev_id)
static inline struct asd_ascb *asd_ascb_alloc(struct asd_ha_struct *asd_ha,
gfp_t gfp_flags)
{
extern kmem_cache_t *asd_ascb_cache;
extern struct kmem_cache *asd_ascb_cache;
struct asd_seq_data *seq = &asd_ha->seq;
struct asd_ascb *ascb;
unsigned long flags;

View File

@ -450,8 +450,8 @@ static inline void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha)
asd_ha->scb_pool = NULL;
}
kmem_cache_t *asd_dma_token_cache;
kmem_cache_t *asd_ascb_cache;
struct kmem_cache *asd_dma_token_cache;
struct kmem_cache *asd_ascb_cache;
static int asd_create_global_caches(void)
{

View File

@ -36,7 +36,7 @@
#include "../scsi_sas_internal.h"
kmem_cache_t *sas_task_cache;
struct kmem_cache *sas_task_cache;
/*------------ SAS addr hash -----------*/
void sas_hash_addr(u8 *hashed, const u8 *sas_addr)

View File

@ -24,7 +24,7 @@ char qla2x00_version_str[40];
/*
* SRB allocation cache
*/
static kmem_cache_t *srb_cachep;
static struct kmem_cache *srb_cachep;
/*
* Ioctl related information.

View File

@ -19,7 +19,7 @@ char qla4xxx_version_str[40];
/*
* SRB allocation cache
*/
static kmem_cache_t *srb_cachep;
static struct kmem_cache *srb_cachep;
/*
* Module parameter information and variables

View File

@ -136,7 +136,7 @@ const char * scsi_device_type(unsigned type)
EXPORT_SYMBOL(scsi_device_type);
struct scsi_host_cmd_pool {
kmem_cache_t *slab;
struct kmem_cache *slab;
unsigned int users;
char *name;
unsigned int slab_flags;

View File

@ -36,7 +36,7 @@
struct scsi_host_sg_pool {
size_t size;
char *name;
kmem_cache_t *slab;
struct kmem_cache *slab;
mempool_t *pool;
};
@ -241,7 +241,7 @@ struct scsi_io_context {
char sense[SCSI_SENSE_BUFFERSIZE];
};
static kmem_cache_t *scsi_io_context_cache;
static struct kmem_cache *scsi_io_context_cache;
static void scsi_end_async(struct request *req, int uptodate)
{

View File

@ -33,7 +33,7 @@
#include "scsi_tgt_priv.h"
static struct workqueue_struct *scsi_tgtd;
static kmem_cache_t *scsi_tgt_cmd_cache;
static struct kmem_cache *scsi_tgt_cmd_cache;
/*
* TODO: this struct will be killed when the block layer supports large bios

View File

@ -275,13 +275,13 @@ static volatile USB_SB_Desc_t TxIntrSB_zout __attribute__ ((aligned (4)));
static int zout_buffer[4] __attribute__ ((aligned (4)));
/* Cache for allocating new EP and SB descriptors. */
static kmem_cache_t *usb_desc_cache;
static struct kmem_cache *usb_desc_cache;
/* Cache for the registers allocated in the top half. */
static kmem_cache_t *top_half_reg_cache;
static struct kmem_cache *top_half_reg_cache;
/* Cache for the data allocated in the isoc descr top half. */
static kmem_cache_t *isoc_compl_cache;
static struct kmem_cache *isoc_compl_cache;
static struct usb_bus *etrax_usb_bus;

View File

@ -81,7 +81,7 @@ MODULE_PARM_DESC(debug, "Debug level");
static char *errbuf;
#define ERRBUF_LEN (32 * 1024)
static kmem_cache_t *uhci_up_cachep; /* urb_priv */
static struct kmem_cache *uhci_up_cachep; /* urb_priv */
static void suspend_rh(struct uhci_hcd *uhci, enum uhci_rh_state new_state);
static void wakeup_rh(struct uhci_hcd *uhci);

View File

@ -50,7 +50,7 @@ struct mon_event_text {
#define SLAB_NAME_SZ 30
struct mon_reader_text {
kmem_cache_t *e_slab;
struct kmem_cache *e_slab;
int nevents;
struct list_head e_list;
struct mon_reader r; /* In C, parent class can be placed anywhere */
@ -63,7 +63,7 @@ struct mon_reader_text {
char slab_name[SLAB_NAME_SZ];
};
static void mon_text_ctor(void *, kmem_cache_t *, unsigned long);
static void mon_text_ctor(void *, struct kmem_cache *, unsigned long);
/*
* mon_text_submit
@ -450,7 +450,7 @@ const struct file_operations mon_fops_text = {
/*
* Slab interface: constructor.
*/
static void mon_text_ctor(void *mem, kmem_cache_t *slab, unsigned long sflags)
static void mon_text_ctor(void *mem, struct kmem_cache *slab, unsigned long sflags)
{
/*
* Nothing to initialize. No, really!

View File

@ -212,7 +212,7 @@ static int adfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
static kmem_cache_t *adfs_inode_cachep;
static struct kmem_cache *adfs_inode_cachep;
static struct inode *adfs_alloc_inode(struct super_block *sb)
{
@ -228,7 +228,7 @@ static void adfs_destroy_inode(struct inode *inode)
kmem_cache_free(adfs_inode_cachep, ADFS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct adfs_inode_info *ei = (struct adfs_inode_info *) foo;

View File

@ -66,7 +66,7 @@ affs_write_super(struct super_block *sb)
pr_debug("AFFS: write_super() at %lu, clean=%d\n", get_seconds(), clean);
}
static kmem_cache_t * affs_inode_cachep;
static struct kmem_cache * affs_inode_cachep;
static struct inode *affs_alloc_inode(struct super_block *sb)
{
@ -83,7 +83,7 @@ static void affs_destroy_inode(struct inode *inode)
kmem_cache_free(affs_inode_cachep, AFFS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct affs_inode_info *ei = (struct affs_inode_info *) foo;

View File

@ -35,7 +35,7 @@ struct afs_mount_params {
struct afs_volume *volume;
};
static void afs_i_init_once(void *foo, kmem_cache_t *cachep,
static void afs_i_init_once(void *foo, struct kmem_cache *cachep,
unsigned long flags);
static int afs_get_sb(struct file_system_type *fs_type,
@ -65,7 +65,7 @@ static struct super_operations afs_super_ops = {
.put_super = afs_put_super,
};
static kmem_cache_t *afs_inode_cachep;
static struct kmem_cache *afs_inode_cachep;
static atomic_t afs_count_active_inodes;
/*****************************************************************************/
@ -384,7 +384,7 @@ static void afs_put_super(struct super_block *sb)
/*
* initialise an inode cache slab element prior to any use
*/
static void afs_i_init_once(void *_vnode, kmem_cache_t *cachep,
static void afs_i_init_once(void *_vnode, struct kmem_cache *cachep,
unsigned long flags)
{
struct afs_vnode *vnode = (struct afs_vnode *) _vnode;

View File

@ -47,8 +47,8 @@ unsigned long aio_nr; /* current system wide number of aio requests */
unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
static kmem_cache_t *kiocb_cachep;
static kmem_cache_t *kioctx_cachep;
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
static struct workqueue_struct *aio_wq;

View File

@ -61,7 +61,7 @@ static const struct super_operations befs_sops = {
};
/* slab cache for befs_inode_info objects */
static kmem_cache_t *befs_inode_cachep;
static struct kmem_cache *befs_inode_cachep;
static const struct file_operations befs_dir_operations = {
.read = generic_read_dir,
@ -289,7 +289,7 @@ befs_destroy_inode(struct inode *inode)
kmem_cache_free(befs_inode_cachep, BEFS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct befs_inode_info *bi = (struct befs_inode_info *) foo;

View File

@ -228,7 +228,7 @@ static void bfs_write_super(struct super_block *s)
unlock_kernel();
}
static kmem_cache_t * bfs_inode_cachep;
static struct kmem_cache * bfs_inode_cachep;
static struct inode *bfs_alloc_inode(struct super_block *sb)
{
@ -244,7 +244,7 @@ static void bfs_destroy_inode(struct inode *inode)
kmem_cache_free(bfs_inode_cachep, BFS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct bfs_inode_info *bi = foo;

View File

@ -30,7 +30,7 @@
#define BIO_POOL_SIZE 256
static kmem_cache_t *bio_slab __read_mostly;
static struct kmem_cache *bio_slab __read_mostly;
#define BIOVEC_NR_POOLS 6
@ -44,7 +44,7 @@ mempool_t *bio_split_pool __read_mostly;
struct biovec_slab {
int nr_vecs;
char *name;
kmem_cache_t *slab;
struct kmem_cache *slab;
};
/*

View File

@ -235,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync)
*/
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
static kmem_cache_t * bdev_cachep __read_mostly;
static struct kmem_cache * bdev_cachep __read_mostly;
static struct inode *bdev_alloc_inode(struct super_block *sb)
{
@ -253,7 +253,7 @@ static void bdev_destroy_inode(struct inode *inode)
kmem_cache_free(bdev_cachep, bdi);
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct bdev_inode *ei = (struct bdev_inode *) foo;
struct block_device *bdev = &ei->bdev;

View File

@ -2908,7 +2908,7 @@ asmlinkage long sys_bdflush(int func, long data)
/*
* Buffer-head allocation
*/
static kmem_cache_t *bh_cachep;
static struct kmem_cache *bh_cachep;
/*
* Once the number of bh's in the machine exceeds this level, we start
@ -2961,7 +2961,7 @@ void free_buffer_head(struct buffer_head *bh)
EXPORT_SYMBOL(free_buffer_head);
static void
init_buffer_head(void *data, kmem_cache_t *cachep, unsigned long flags)
init_buffer_head(void *data, struct kmem_cache *cachep, unsigned long flags)
{
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {

View File

@ -81,7 +81,7 @@ extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp;
extern kmem_cache_t *cifs_oplock_cachep;
extern struct kmem_cache *cifs_oplock_cachep;
static int
cifs_read_super(struct super_block *sb, void *data,
@ -232,11 +232,11 @@ static int cifs_permission(struct inode * inode, int mask, struct nameidata *nd)
return generic_permission(inode, mask, NULL);
}
static kmem_cache_t *cifs_inode_cachep;
static kmem_cache_t *cifs_req_cachep;
static kmem_cache_t *cifs_mid_cachep;
kmem_cache_t *cifs_oplock_cachep;
static kmem_cache_t *cifs_sm_req_cachep;
static struct kmem_cache *cifs_inode_cachep;
static struct kmem_cache *cifs_req_cachep;
static struct kmem_cache *cifs_mid_cachep;
struct kmem_cache *cifs_oplock_cachep;
static struct kmem_cache *cifs_sm_req_cachep;
mempool_t *cifs_sm_req_poolp;
mempool_t *cifs_req_poolp;
mempool_t *cifs_mid_poolp;
@ -668,7 +668,7 @@ const struct file_operations cifs_dir_ops = {
};
static void
cifs_init_once(void *inode, kmem_cache_t * cachep, unsigned long flags)
cifs_init_once(void *inode, struct kmem_cache * cachep, unsigned long flags)
{
struct cifsInodeInfo *cifsi = inode;

View File

@ -34,7 +34,7 @@
#include "cifs_debug.h"
extern mempool_t *cifs_mid_poolp;
extern kmem_cache_t *cifs_oplock_cachep;
extern struct kmem_cache *cifs_oplock_cachep;
static struct mid_q_entry *
AllocMidQEntry(const struct smb_hdr *smb_buffer, struct cifsSesInfo *ses)

View File

@ -38,7 +38,7 @@ static void coda_clear_inode(struct inode *);
static void coda_put_super(struct super_block *);
static int coda_statfs(struct dentry *dentry, struct kstatfs *buf);
static kmem_cache_t * coda_inode_cachep;
static struct kmem_cache * coda_inode_cachep;
static struct inode *coda_alloc_inode(struct super_block *sb)
{
@ -58,7 +58,7 @@ static void coda_destroy_inode(struct inode *inode)
kmem_cache_free(coda_inode_cachep, ITOC(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct coda_inode_info *ei = (struct coda_inode_info *) foo;

View File

@ -49,7 +49,7 @@ struct configfs_dirent {
#define CONFIGFS_NOT_PINNED (CONFIGFS_ITEM_ATTR)
extern struct vfsmount * configfs_mount;
extern kmem_cache_t *configfs_dir_cachep;
extern struct kmem_cache *configfs_dir_cachep;
extern int configfs_is_root(struct config_item *item);

View File

@ -38,7 +38,7 @@
struct vfsmount * configfs_mount = NULL;
struct super_block * configfs_sb = NULL;
kmem_cache_t *configfs_dir_cachep;
struct kmem_cache *configfs_dir_cachep;
static int configfs_mnt_count = 0;
static struct super_operations configfs_ops = {

View File

@ -43,7 +43,7 @@ static __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(dcache_lock);
static kmem_cache_t *dentry_cache __read_mostly;
static struct kmem_cache *dentry_cache __read_mostly;
#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
@ -2072,10 +2072,10 @@ static void __init dcache_init(unsigned long mempages)
}
/* SLAB cache for __getname() consumers */
kmem_cache_t *names_cachep __read_mostly;
struct kmem_cache *names_cachep __read_mostly;
/* SLAB cache for file structures */
kmem_cache_t *filp_cachep __read_mostly;
struct kmem_cache *filp_cachep __read_mostly;
EXPORT_SYMBOL(d_genocide);

View File

@ -37,7 +37,7 @@ struct dcookie_struct {
static LIST_HEAD(dcookie_users);
static DEFINE_MUTEX(dcookie_mutex);
static kmem_cache_t *dcookie_cache __read_mostly;
static struct kmem_cache *dcookie_cache __read_mostly;
static struct list_head *dcookie_hashtable __read_mostly;
static size_t hash_size __read_mostly;

View File

@ -15,7 +15,7 @@
#include "config.h"
#include "memory.h"
static kmem_cache_t *lkb_cache;
static struct kmem_cache *lkb_cache;
int dlm_memory_init(void)

View File

@ -23,7 +23,7 @@
int dir_notify_enable __read_mostly = 1;
static kmem_cache_t *dn_cache __read_mostly;
static struct kmem_cache *dn_cache __read_mostly;
static void redo_inode_mask(struct inode *inode)
{

View File

@ -131,7 +131,7 @@ static struct quota_format_type *quota_formats; /* List of registered formats */
static struct quota_module_name module_names[] = INIT_QUOTA_MODULE_NAMES;
/* SLAB cache for dquot structures */
static kmem_cache_t *dquot_cachep;
static struct kmem_cache *dquot_cachep;
int register_quota_format(struct quota_format_type *fmt)
{

View File

@ -546,7 +546,7 @@ inode_info_init_once(void *vptr, struct kmem_cache *cachep, unsigned long flags)
}
static struct ecryptfs_cache_info {
kmem_cache_t **cache;
struct kmem_cache **cache;
const char *name;
size_t size;
void (*ctor)(void*, struct kmem_cache *, unsigned long);

View File

@ -52,7 +52,7 @@ static struct pt_types sgi_pt_types[] = {
};
static kmem_cache_t * efs_inode_cachep;
static struct kmem_cache * efs_inode_cachep;
static struct inode *efs_alloc_inode(struct super_block *sb)
{
@ -68,7 +68,7 @@ static void efs_destroy_inode(struct inode *inode)
kmem_cache_free(efs_inode_cachep, INODE_INFO(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct efs_inode_info *ei = (struct efs_inode_info *) foo;

View File

@ -283,10 +283,10 @@ static struct mutex epmutex;
static struct poll_safewake psw;
/* Slab cache used to allocate "struct epitem" */
static kmem_cache_t *epi_cache __read_mostly;
static struct kmem_cache *epi_cache __read_mostly;
/* Slab cache used to allocate "struct eppoll_entry" */
static kmem_cache_t *pwq_cache __read_mostly;
static struct kmem_cache *pwq_cache __read_mostly;
/* Virtual fs used to allocate inodes for eventpoll files */
static struct vfsmount *eventpoll_mnt __read_mostly;

View File

@ -135,7 +135,7 @@ static void ext2_put_super (struct super_block * sb)
return;
}
static kmem_cache_t * ext2_inode_cachep;
static struct kmem_cache * ext2_inode_cachep;
static struct inode *ext2_alloc_inode(struct super_block *sb)
{
@ -156,7 +156,7 @@ static void ext2_destroy_inode(struct inode *inode)
kmem_cache_free(ext2_inode_cachep, EXT2_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct ext2_inode_info *ei = (struct ext2_inode_info *) foo;

View File

@ -436,7 +436,7 @@ static void ext3_put_super (struct super_block * sb)
return;
}
static kmem_cache_t *ext3_inode_cachep;
static struct kmem_cache *ext3_inode_cachep;
/*
* Called inside transaction, so use GFP_NOFS
@ -462,7 +462,7 @@ static void ext3_destroy_inode(struct inode *inode)
kmem_cache_free(ext3_inode_cachep, EXT3_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct ext3_inode_info *ei = (struct ext3_inode_info *) foo;

View File

@ -486,7 +486,7 @@ static void ext4_put_super (struct super_block * sb)
return;
}
static kmem_cache_t *ext4_inode_cachep;
static struct kmem_cache *ext4_inode_cachep;
/*
* Called inside transaction, so use GFP_NOFS
@ -513,7 +513,7 @@ static void ext4_destroy_inode(struct inode *inode)
kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;

View File

@ -34,9 +34,9 @@ static inline int fat_max_cache(struct inode *inode)
return FAT_MAX_CACHE;
}
static kmem_cache_t *fat_cache_cachep;
static struct kmem_cache *fat_cache_cachep;
static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct fat_cache *cache = (struct fat_cache *)foo;

View File

@ -477,7 +477,7 @@ static void fat_put_super(struct super_block *sb)
kfree(sbi);
}
static kmem_cache_t *fat_inode_cachep;
static struct kmem_cache *fat_inode_cachep;
static struct inode *fat_alloc_inode(struct super_block *sb)
{
@ -493,7 +493,7 @@ static void fat_destroy_inode(struct inode *inode)
kmem_cache_free(fat_inode_cachep, MSDOS_I(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct msdos_inode_info *ei = (struct msdos_inode_info *)foo;

View File

@ -553,7 +553,7 @@ int send_sigurg(struct fown_struct *fown)
}
static DEFINE_RWLOCK(fasync_lock);
static kmem_cache_t *fasync_cache __read_mostly;
static struct kmem_cache *fasync_cache __read_mostly;
/*
* fasync_helper() is used by some character device drivers (mainly mice)

View File

@ -46,7 +46,7 @@ extern const struct address_space_operations vxfs_immed_aops;
extern struct inode_operations vxfs_immed_symlink_iops;
kmem_cache_t *vxfs_inode_cachep;
struct kmem_cache *vxfs_inode_cachep;
#ifdef DIAGNOSTIC

View File

@ -19,7 +19,7 @@
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
static kmem_cache_t *fuse_req_cachep;
static struct kmem_cache *fuse_req_cachep;
static struct fuse_conn *fuse_get_conn(struct file *file)
{

View File

@ -22,7 +22,7 @@ MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
MODULE_DESCRIPTION("Filesystem in Userspace");
MODULE_LICENSE("GPL");
static kmem_cache_t *fuse_inode_cachep;
static struct kmem_cache *fuse_inode_cachep;
struct list_head fuse_conn_list;
DEFINE_MUTEX(fuse_mutex);
@ -601,7 +601,7 @@ static struct file_system_type fuse_fs_type = {
static decl_subsys(fuse, NULL, NULL);
static decl_subsys(connections, NULL, NULL);
static void fuse_inode_init_once(void *foo, kmem_cache_t *cachep,
static void fuse_inode_init_once(void *foo, struct kmem_cache *cachep,
unsigned long flags)
{
struct inode * inode = foo;

View File

@ -25,7 +25,7 @@
#include "util.h"
#include "glock.h"
static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
static void gfs2_init_inode_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_inode *ip = foo;
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
@ -37,7 +37,7 @@ static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long
}
}
static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
static void gfs2_init_glock_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct gfs2_glock *gl = foo;
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==

View File

@ -23,9 +23,9 @@
#include "lm.h"
#include "util.h"
kmem_cache_t *gfs2_glock_cachep __read_mostly;
kmem_cache_t *gfs2_inode_cachep __read_mostly;
kmem_cache_t *gfs2_bufdata_cachep __read_mostly;
struct kmem_cache *gfs2_glock_cachep __read_mostly;
struct kmem_cache *gfs2_inode_cachep __read_mostly;
struct kmem_cache *gfs2_bufdata_cachep __read_mostly;
void gfs2_assert_i(struct gfs2_sbd *sdp)
{

View File

@ -146,9 +146,9 @@ int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
extern kmem_cache_t *gfs2_glock_cachep;
extern kmem_cache_t *gfs2_inode_cachep;
extern kmem_cache_t *gfs2_bufdata_cachep;
extern struct kmem_cache *gfs2_glock_cachep;
extern struct kmem_cache *gfs2_inode_cachep;
extern struct kmem_cache *gfs2_bufdata_cachep;
static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt,
unsigned int *p)

View File

@ -24,7 +24,7 @@
#include "hfs_fs.h"
#include "btree.h"
static kmem_cache_t *hfs_inode_cachep;
static struct kmem_cache *hfs_inode_cachep;
MODULE_LICENSE("GPL");
@ -430,7 +430,7 @@ static struct file_system_type hfs_fs_type = {
.fs_flags = FS_REQUIRES_DEV,
};
static void hfs_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
static void hfs_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
{
struct hfs_inode_info *i = p;

View File

@ -434,7 +434,7 @@ MODULE_AUTHOR("Brad Boyer");
MODULE_DESCRIPTION("Extended Macintosh Filesystem");
MODULE_LICENSE("GPL");
static kmem_cache_t *hfsplus_inode_cachep;
static struct kmem_cache *hfsplus_inode_cachep;
static struct inode *hfsplus_alloc_inode(struct super_block *sb)
{
@ -467,7 +467,7 @@ static struct file_system_type hfsplus_fs_type = {
.fs_flags = FS_REQUIRES_DEV,
};
static void hfsplus_init_once(void *p, kmem_cache_t *cachep, unsigned long flags)
static void hfsplus_init_once(void *p, struct kmem_cache *cachep, unsigned long flags)
{
struct hfsplus_inode_info *i = p;

View File

@ -160,7 +160,7 @@ static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
}
static kmem_cache_t * hpfs_inode_cachep;
static struct kmem_cache * hpfs_inode_cachep;
static struct inode *hpfs_alloc_inode(struct super_block *sb)
{
@ -177,7 +177,7 @@ static void hpfs_destroy_inode(struct inode *inode)
kmem_cache_free(hpfs_inode_cachep, hpfs_i(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct hpfs_inode_info *ei = (struct hpfs_inode_info *) foo;

View File

@ -513,7 +513,7 @@ static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
}
static kmem_cache_t *hugetlbfs_inode_cachep;
static struct kmem_cache *hugetlbfs_inode_cachep;
static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
{
@ -545,7 +545,7 @@ static const struct address_space_operations hugetlbfs_aops = {
};
static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;

View File

@ -97,7 +97,7 @@ static DEFINE_MUTEX(iprune_mutex);
*/
struct inodes_stat_t inodes_stat;
static kmem_cache_t * inode_cachep __read_mostly;
static struct kmem_cache * inode_cachep __read_mostly;
static struct inode *alloc_inode(struct super_block *sb)
{
@ -209,7 +209,7 @@ void inode_init_once(struct inode *inode)
EXPORT_SYMBOL(inode_init_once);
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct inode * inode = (struct inode *) foo;

View File

@ -34,8 +34,8 @@
#include <asm/ioctls.h>
static kmem_cache_t *watch_cachep __read_mostly;
static kmem_cache_t *event_cachep __read_mostly;
static struct kmem_cache *watch_cachep __read_mostly;
static struct kmem_cache *event_cachep __read_mostly;
static struct vfsmount *inotify_mnt __read_mostly;

View File

@ -57,7 +57,7 @@ static void isofs_put_super(struct super_block *sb)
static void isofs_read_inode(struct inode *);
static int isofs_statfs (struct dentry *, struct kstatfs *);
static kmem_cache_t *isofs_inode_cachep;
static struct kmem_cache *isofs_inode_cachep;
static struct inode *isofs_alloc_inode(struct super_block *sb)
{
@ -73,7 +73,7 @@ static void isofs_destroy_inode(struct inode *inode)
kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
}
static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
{
struct iso_inode_info *ei = foo;

View File

@ -1630,7 +1630,7 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
#define JBD_MAX_SLABS 5
#define JBD_SLAB_INDEX(size) (size >> 11)
static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
static const char *jbd_slab_names[JBD_MAX_SLABS] = {
"jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k"
};
@ -1693,7 +1693,7 @@ void jbd_slab_free(void *ptr, size_t size)
/*
* Journal_head storage management
*/
static kmem_cache_t *journal_head_cache;
static struct kmem_cache *journal_head_cache;
#ifdef CONFIG_JBD_DEBUG
static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif
@ -1996,7 +1996,7 @@ static void __exit remove_jbd_proc_entry(void)
#endif
kmem_cache_t *jbd_handle_cache;
struct kmem_cache *jbd_handle_cache;
static int __init journal_init_handle_cache(void)
{

View File

@ -70,8 +70,8 @@
#include <linux/init.h>
#endif
static kmem_cache_t *revoke_record_cache;
static kmem_cache_t *revoke_table_cache;
static struct kmem_cache *revoke_record_cache;
static struct kmem_cache *revoke_table_cache;
/* Each revoke record represents one single revoked block. During
journal replay, this involves recording the transaction ID of the

View File

@ -1641,7 +1641,7 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
#define JBD_MAX_SLABS 5
#define JBD_SLAB_INDEX(size) (size >> 11)
static kmem_cache_t *jbd_slab[JBD_MAX_SLABS];
static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
static const char *jbd_slab_names[JBD_MAX_SLABS] = {
"jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
};
@ -1704,7 +1704,7 @@ void jbd2_slab_free(void *ptr, size_t size)
/*
* Journal_head storage management
*/
static kmem_cache_t *jbd2_journal_head_cache;
static struct kmem_cache *jbd2_journal_head_cache;
#ifdef CONFIG_JBD_DEBUG
static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif
@ -2007,7 +2007,7 @@ static void __exit jbd2_remove_jbd_proc_entry(void)
#endif
kmem_cache_t *jbd2_handle_cache;
struct kmem_cache *jbd2_handle_cache;
static int __init journal_init_handle_cache(void)
{

View File

@ -70,8 +70,8 @@
#include <linux/init.h>
#endif
static kmem_cache_t *jbd2_revoke_record_cache;
static kmem_cache_t *jbd2_revoke_table_cache;
static struct kmem_cache *jbd2_revoke_record_cache;
static struct kmem_cache *jbd2_revoke_table_cache;
/* Each revoke record represents one single revoked block. During
journal replay, this involves recording the transaction ID of the

View File

@ -61,8 +61,8 @@ static const struct file_operations jffs_dir_operations;
static struct inode_operations jffs_dir_inode_operations;
static const struct address_space_operations jffs_address_operations;
kmem_cache_t *node_cache = NULL;
kmem_cache_t *fm_cache = NULL;
struct kmem_cache *node_cache = NULL;
struct kmem_cache *fm_cache = NULL;
/* Called by the VFS at mount time to initialize the whole file system. */
static int jffs_fill_super(struct super_block *sb, void *data, int silent)

View File

@ -29,8 +29,8 @@ static int jffs_mark_obsolete(struct jffs_fmcontrol *fmc, __u32 fm_offset);
static struct jffs_fm *jffs_alloc_fm(void);
static void jffs_free_fm(struct jffs_fm *n);
extern kmem_cache_t *fm_cache;
extern kmem_cache_t *node_cache;
extern struct kmem_cache *fm_cache;
extern struct kmem_cache *node_cache;
#if CONFIG_JFFS_FS_VERBOSE > 0
void

View File

@ -19,16 +19,16 @@
/* These are initialised to NULL in the kernel startup code.
If you're porting to other operating systems, beware */
static kmem_cache_t *full_dnode_slab;
static kmem_cache_t *raw_dirent_slab;
static kmem_cache_t *raw_inode_slab;
static kmem_cache_t *tmp_dnode_info_slab;
static kmem_cache_t *raw_node_ref_slab;
static kmem_cache_t *node_frag_slab;
static kmem_cache_t *inode_cache_slab;
static struct kmem_cache *full_dnode_slab;
static struct kmem_cache *raw_dirent_slab;
static struct kmem_cache *raw_inode_slab;
static struct kmem_cache *tmp_dnode_info_slab;
static struct kmem_cache *raw_node_ref_slab;
static struct kmem_cache *node_frag_slab;
static struct kmem_cache *inode_cache_slab;
#ifdef CONFIG_JFFS2_FS_XATTR
static kmem_cache_t *xattr_datum_cache;
static kmem_cache_t *xattr_ref_cache;
static struct kmem_cache *xattr_datum_cache;
static struct kmem_cache *xattr_ref_cache;
#endif
int __init jffs2_create_slab_caches(void)

View File

@ -28,7 +28,7 @@
static void jffs2_put_super(struct super_block *);
static kmem_cache_t *jffs2_inode_cachep;
static struct kmem_cache *jffs2_inode_cachep;
static struct inode *jffs2_alloc_inode(struct super_block *sb)
{
@ -44,7 +44,7 @@ static void jffs2_destroy_inode(struct inode *inode)
kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode));
}
static void jffs2_i_init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void jffs2_i_init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct jffs2_inode_info *ei = (struct jffs2_inode_info *) foo;

View File

@ -74,7 +74,7 @@ static inline void lock_metapage(struct metapage *mp)
}
#define METAPOOL_MIN_PAGES 32
static kmem_cache_t *metapage_cache;
static struct kmem_cache *metapage_cache;
static mempool_t *metapage_mempool;
#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
@ -180,7 +180,7 @@ static inline void remove_metapage(struct page *page, struct metapage *mp)
#endif
static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
static void init_once(void *foo, struct kmem_cache *cachep, unsigned long flags)
{
struct metapage *mp = (struct metapage *)foo;

View File

@ -44,7 +44,7 @@ MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
MODULE_LICENSE("GPL");
static kmem_cache_t * jfs_inode_cachep;
static struct kmem_cache * jfs_inode_cachep;
static struct super_operations jfs_super_operations;
static struct export_operations jfs_export_operations;
@ -748,7 +748,7 @@ static struct file_system_type jfs_fs_type = {
.fs_flags = FS_REQUIRES_DEV,
};
static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
{
struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;

View File

@ -142,7 +142,7 @@ int lease_break_time = 45;
static LIST_HEAD(file_lock_list);
static LIST_HEAD(blocked_list);
static kmem_cache_t *filelock_cache __read_mostly;
static struct kmem_cache *filelock_cache __read_mostly;
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void)
@ -199,7 +199,7 @@ EXPORT_SYMBOL(locks_init_lock);
* Initialises the fields of the file lock which are invariant for
* free file_locks.
*/
static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags)
static void init_once(void *foo, struct kmem_cache *cache, unsigned long flags)
{
struct file_lock *lock = (struct file_lock *) foo;

View File

@ -85,7 +85,7 @@ struct mb_cache {
#ifndef MB_CACHE_INDEXES_COUNT
int c_indexes_count;
#endif
kmem_cache_t *c_entry_cache;
struct kmem_cache *c_entry_cache;
struct list_head *c_block_hash;
struct list_head *c_indexes_hash[0];
};

View File

@ -51,7 +51,7 @@ static void minix_put_super(struct super_block *sb)
return;
}
static kmem_cache_t * minix_inode_cachep;
static struct kmem_cache * minix_inode_cachep;
static struct inode *minix_alloc_inode(struct super_block *sb)
{
@ -67,7 +67,7 @@ static void minix_destroy_inode(struct inode *inode)
kmem_cache_free(minix_inode_cachep, minix_i(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct minix_inode_info *ei = (struct minix_inode_info *) foo;

View File

@ -36,7 +36,7 @@ static int event;
static struct list_head *mount_hashtable __read_mostly;
static int hash_mask __read_mostly, hash_bits __read_mostly;
static kmem_cache_t *mnt_cache __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static struct rw_semaphore namespace_sem;
/* /sys/fs */

View File

@ -40,7 +40,7 @@ static void ncp_delete_inode(struct inode *);
static void ncp_put_super(struct super_block *);
static int ncp_statfs(struct dentry *, struct kstatfs *);
static kmem_cache_t * ncp_inode_cachep;
static struct kmem_cache * ncp_inode_cachep;
static struct inode *ncp_alloc_inode(struct super_block *sb)
{
@ -56,7 +56,7 @@ static void ncp_destroy_inode(struct inode *inode)
kmem_cache_free(ncp_inode_cachep, NCP_FINFO(inode));
}
static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
static void init_once(void * foo, struct kmem_cache * cachep, unsigned long flags)
{
struct ncp_inode_info *ei = (struct ncp_inode_info *) foo;

View File

@ -58,7 +58,7 @@
#define NFSDBG_FACILITY NFSDBG_VFS
static kmem_cache_t *nfs_direct_cachep;
static struct kmem_cache *nfs_direct_cachep;
/*
* This represents a set of asynchronous requests that we're waiting on

Some files were not shown because too many files have changed in this diff Show More