mirror of https://gitee.com/openkylin/linux.git
[PATCH] gfp flags annotations - part 1
- added typedef unsigned int __nocast gfp_t; - replaced __nocast uses for gfp flags with gfp_t - it gives exactly the same warnings as far as sparse is concerned, doesn't change generated code (from gcc point of view we replaced unsigned int with typedef) and documents what's going on far better. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
3b0e77bd14
commit
dd0fc66fb3
|
@ -24,7 +24,7 @@ struct dma_coherent_mem {
|
|||
};
|
||||
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
|
|
@ -23,7 +23,7 @@ struct dma_coherent_mem {
|
|||
};
|
||||
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast gfp)
|
||||
dma_addr_t *dma_handle, gfp_t gfp)
|
||||
{
|
||||
void *ret;
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
|
|
@ -310,7 +310,7 @@ static void bpa_map_iommu(void)
|
|||
|
||||
|
||||
static void *bpa_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
EXPORT_SYMBOL(dma_set_mask);
|
||||
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
|
|
|
@ -519,7 +519,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
|
|||
* to the dma address (mapping) of the first page.
|
||||
*/
|
||||
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
void *ret = NULL;
|
||||
dma_addr_t mapping;
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
#include "pci.h"
|
||||
|
||||
static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
|
|||
* to the dma address (mapping) of the first page.
|
||||
*/
|
||||
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
|
||||
flag);
|
||||
|
|
|
@ -218,7 +218,7 @@ static void vio_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
}
|
||||
|
||||
static void *vio_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag)
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
|
||||
dma_handle, flag);
|
||||
|
|
|
@ -795,7 +795,7 @@ static void drain_rx_pools (amb_dev * dev) {
|
|||
}
|
||||
|
||||
static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
|
||||
unsigned int __nocast priority)
|
||||
gfp_t priority)
|
||||
{
|
||||
rx_in rx;
|
||||
amb_rxq * rxq;
|
||||
|
|
|
@ -1374,8 +1374,7 @@ static void reset_chip (struct fs_dev *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void __devinit *aligned_kmalloc (int size, unsigned int __nocast flags,
|
||||
int alignment)
|
||||
static void __devinit *aligned_kmalloc (int size, gfp_t flags, int alignment)
|
||||
{
|
||||
void *t;
|
||||
|
||||
|
@ -1466,7 +1465,7 @@ static inline int nr_buffers_in_freepool (struct fs_dev *dev, struct freepool *f
|
|||
working again after that... -- REW */
|
||||
|
||||
static void top_off_fp (struct fs_dev *dev, struct freepool *fp,
|
||||
unsigned int __nocast gfp_flags)
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct FS_BPENTRY *qe, *ne;
|
||||
struct sk_buff *skb;
|
||||
|
|
|
@ -178,7 +178,7 @@ fore200e_irq_itoa(int irq)
|
|||
|
||||
|
||||
static void*
|
||||
fore200e_kmalloc(int size, unsigned int __nocast flags)
|
||||
fore200e_kmalloc(int size, gfp_t flags)
|
||||
{
|
||||
void *chunk = kzalloc(size, flags);
|
||||
|
||||
|
|
|
@ -156,7 +156,7 @@ dma_pool_create (const char *name, struct device *dev,
|
|||
|
||||
|
||||
static struct dma_page *
|
||||
pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags)
|
||||
pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags)
|
||||
{
|
||||
struct dma_page *page;
|
||||
int mapsize;
|
||||
|
@ -262,8 +262,7 @@ dma_pool_destroy (struct dma_pool *pool)
|
|||
* If such a memory block can't be allocated, null is returned.
|
||||
*/
|
||||
void *
|
||||
dma_pool_alloc (struct dma_pool *pool, unsigned int __nocast mem_flags,
|
||||
dma_addr_t *handle)
|
||||
dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct dma_page *page;
|
||||
|
|
|
@ -229,7 +229,7 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
|
||||
static void *pkt_rb_alloc(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
|
||||
}
|
||||
|
@ -2082,7 +2082,7 @@ static int pkt_close(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
|
||||
static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
|
||||
static void *psd_pool_alloc(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
|
||||
}
|
||||
|
|
|
@ -308,7 +308,7 @@ static void bpa10x_complete(struct urb *urb, struct pt_regs *regs)
|
|||
}
|
||||
|
||||
static inline struct urb *bpa10x_alloc_urb(struct usb_device *udev, unsigned int pipe,
|
||||
size_t size, unsigned int __nocast flags, void *data)
|
||||
size_t size, gfp_t flags, void *data)
|
||||
{
|
||||
struct urb *urb;
|
||||
struct usb_ctrlrequest *cr;
|
||||
|
|
|
@ -132,7 +132,7 @@ static struct usb_device_id blacklist_ids[] = {
|
|||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
static struct _urb *_urb_alloc(int isoc, unsigned int __nocast gfp)
|
||||
static struct _urb *_urb_alloc(int isoc, gfp_t gfp)
|
||||
{
|
||||
struct _urb *_urb = kmalloc(sizeof(struct _urb) +
|
||||
sizeof(struct usb_iso_packet_descriptor) * isoc, gfp);
|
||||
|
|
|
@ -69,8 +69,7 @@ int cn_already_initialized = 0;
|
|||
* a new message.
|
||||
*
|
||||
*/
|
||||
int cn_netlink_send(struct cn_msg *msg, u32 __group,
|
||||
unsigned int __nocast gfp_mask)
|
||||
int cn_netlink_send(struct cn_msg *msg, u32 __group, gfp_t gfp_mask)
|
||||
{
|
||||
struct cn_callback_entry *__cbq;
|
||||
unsigned int size;
|
||||
|
|
|
@ -98,7 +98,7 @@ static struct hpsb_address_ops arm_ops = {
|
|||
|
||||
static void queue_complete_cb(struct pending_request *req);
|
||||
|
||||
static struct pending_request *__alloc_pending_request(unsigned int __nocast flags)
|
||||
static struct pending_request *__alloc_pending_request(gfp_t flags)
|
||||
{
|
||||
struct pending_request *req;
|
||||
|
||||
|
|
|
@ -783,7 +783,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
|||
u32 remote_qpn, u16 pkey_index,
|
||||
struct ib_ah *ah, int rmpp_active,
|
||||
int hdr_len, int data_len,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct ib_mad_agent_private *mad_agent_priv;
|
||||
struct ib_mad_send_buf *send_buf;
|
||||
|
|
|
@ -574,7 +574,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
|
|||
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
|
||||
struct ib_sa_path_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_path_rec *resp,
|
||||
void *context),
|
||||
|
@ -676,7 +676,7 @@ static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
|
|||
int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
|
||||
struct ib_sa_service_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_service_rec *resp,
|
||||
void *context),
|
||||
|
@ -759,7 +759,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
|
|||
u8 method,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_mcmember_rec *resp,
|
||||
void *context),
|
||||
|
|
|
@ -96,7 +96,7 @@ static kmem_cache_t *_crypt_io_pool;
|
|||
/*
|
||||
* Mempool alloc and free functions for the page
|
||||
*/
|
||||
static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data)
|
||||
static void *mempool_alloc_page(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
return alloc_page(gfp_mask);
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ struct io {
|
|||
static unsigned _num_ios;
|
||||
static mempool_t *_io_pool;
|
||||
|
||||
static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data)
|
||||
static void *alloc_io(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
return kmalloc(sizeof(struct io), gfp_mask);
|
||||
}
|
||||
|
|
|
@ -122,7 +122,7 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
|
|||
/* FIXME move this */
|
||||
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
|
||||
|
||||
static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data)
|
||||
static void *region_alloc(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
return kmalloc(sizeof(struct region), gfp_mask);
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
static mdk_personality_t multipath_personality;
|
||||
|
||||
|
||||
static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data)
|
||||
static void *mp_pool_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
struct multipath_bh *mpb;
|
||||
mpb = kmalloc(sizeof(*mpb), gfp_flags);
|
||||
|
|
|
@ -52,7 +52,7 @@ static mdk_personality_t raid1_personality;
|
|||
static void unplug_slaves(mddev_t *mddev);
|
||||
|
||||
|
||||
static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
|
||||
static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
struct pool_info *pi = data;
|
||||
r1bio_t *r1_bio;
|
||||
|
@ -79,7 +79,7 @@ static void r1bio_pool_free(void *r1_bio, void *data)
|
|||
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
|
||||
#define RESYNC_WINDOW (2048*1024)
|
||||
|
||||
static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
|
||||
static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
struct pool_info *pi = data;
|
||||
struct page *page;
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
|
||||
static void unplug_slaves(mddev_t *mddev);
|
||||
|
||||
static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
|
||||
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
conf_t *conf = data;
|
||||
r10bio_t *r10_bio;
|
||||
|
@ -81,7 +81,7 @@ static void r10bio_pool_free(void *r10_bio, void *data)
|
|||
* one for write (we recover only one drive per r10buf)
|
||||
*
|
||||
*/
|
||||
static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
|
||||
static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
conf_t *conf = data;
|
||||
struct page *page;
|
||||
|
|
|
@ -1290,7 +1290,7 @@ static void bond_mc_list_destroy(struct bonding *bond)
|
|||
* Copy all the Multicast addresses from src to the bonding device dst
|
||||
*/
|
||||
static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
|
||||
unsigned int __nocast gfp_flag)
|
||||
gfp_t gfp_flag)
|
||||
{
|
||||
struct dev_mc_list *dmi, *new_dmi;
|
||||
|
||||
|
|
|
@ -584,7 +584,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline int rx_refill(struct net_device *ndev, unsigned int __nocast gfp)
|
||||
static inline int rx_refill(struct net_device *ndev, gfp_t gfp)
|
||||
{
|
||||
struct ns83820 *dev = PRIV(ndev);
|
||||
unsigned i;
|
||||
|
|
|
@ -1036,7 +1036,7 @@ struct gem {
|
|||
#define ALIGNED_RX_SKB_ADDR(addr) \
|
||||
((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr))
|
||||
static __inline__ struct sk_buff *gem_alloc_skb(int size,
|
||||
unsigned int __nocast gfp_flags)
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct sk_buff *skb = alloc_skb(size + 64, gfp_flags);
|
||||
|
||||
|
|
|
@ -833,7 +833,7 @@ zfcp_unit_dequeue(struct zfcp_unit *unit)
|
|||
}
|
||||
|
||||
static void *
|
||||
zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
|
||||
zfcp_mempool_alloc(gfp_t gfp_mask, void *size)
|
||||
{
|
||||
return kmalloc((size_t) size, gfp_mask);
|
||||
}
|
||||
|
|
10
fs/bio.c
10
fs/bio.c
|
@ -75,7 +75,7 @@ struct bio_set {
|
|||
*/
|
||||
static struct bio_set *fs_bio_set;
|
||||
|
||||
static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
|
||||
static inline struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
|
||||
{
|
||||
struct bio_vec *bvl;
|
||||
struct biovec_slab *bp;
|
||||
|
@ -155,7 +155,7 @@ inline void bio_init(struct bio *bio)
|
|||
* allocate bio and iovecs from the memory pools specified by the
|
||||
* bio_set structure.
|
||||
**/
|
||||
struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
{
|
||||
struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
||||
|
@ -181,7 +181,7 @@ struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs, stru
|
|||
return bio;
|
||||
}
|
||||
|
||||
struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
|
||||
struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
|
||||
{
|
||||
struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
|
||||
|
||||
|
@ -277,7 +277,7 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
|
|||
*
|
||||
* Like __bio_clone, only also allocates the returned bio
|
||||
*/
|
||||
struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
|
||||
struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
|
||||
|
||||
|
@ -1078,7 +1078,7 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors)
|
|||
return bp;
|
||||
}
|
||||
|
||||
static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data)
|
||||
static void *bio_pair_alloc(gfp_t gfp_flags, void *data)
|
||||
{
|
||||
return kmalloc(sizeof(struct bio_pair), gfp_flags);
|
||||
}
|
||||
|
|
|
@ -3045,7 +3045,7 @@ static void recalc_bh_state(void)
|
|||
buffer_heads_over_limit = (tot > max_buffer_heads);
|
||||
}
|
||||
|
||||
struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
|
||||
{
|
||||
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
|
||||
if (ret) {
|
||||
|
|
|
@ -102,7 +102,7 @@ static struct bio *mpage_bio_submit(int rw, struct bio *bio)
|
|||
static struct bio *
|
||||
mpage_alloc(struct block_device *bdev,
|
||||
sector_t first_sector, int nr_vecs,
|
||||
unsigned int __nocast gfp_flags)
|
||||
gfp_t gfp_flags)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@
|
|||
* Depending on @gfp_mask the allocation may be guaranteed to succeed.
|
||||
*/
|
||||
static inline void *__ntfs_malloc(unsigned long size,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
if (likely(size <= PAGE_SIZE)) {
|
||||
BUG_ON(!size);
|
||||
|
|
|
@ -35,7 +35,7 @@ EXPORT_SYMBOL(posix_acl_permission);
|
|||
* Allocate a new ACL with the specified number of entries.
|
||||
*/
|
||||
struct posix_acl *
|
||||
posix_acl_alloc(int count, unsigned int __nocast flags)
|
||||
posix_acl_alloc(int count, gfp_t flags)
|
||||
{
|
||||
const size_t size = sizeof(struct posix_acl) +
|
||||
count * sizeof(struct posix_acl_entry);
|
||||
|
@ -51,7 +51,7 @@ posix_acl_alloc(int count, unsigned int __nocast flags)
|
|||
* Clone an ACL.
|
||||
*/
|
||||
struct posix_acl *
|
||||
posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags)
|
||||
posix_acl_clone(const struct posix_acl *acl, gfp_t flags)
|
||||
{
|
||||
struct posix_acl *clone = NULL;
|
||||
|
||||
|
@ -185,7 +185,7 @@ posix_acl_equiv_mode(const struct posix_acl *acl, mode_t *mode_p)
|
|||
* Create an ACL representing the file mode permission bits of an inode.
|
||||
*/
|
||||
struct posix_acl *
|
||||
posix_acl_from_mode(mode_t mode, unsigned int __nocast flags)
|
||||
posix_acl_from_mode(mode_t mode, gfp_t flags)
|
||||
{
|
||||
struct posix_acl *acl = posix_acl_alloc(3, flags);
|
||||
if (!acl)
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
|
||||
|
||||
void *
|
||||
kmem_alloc(size_t size, unsigned int __nocast flags)
|
||||
kmem_alloc(size_t size, gfp_t flags)
|
||||
{
|
||||
int retries = 0;
|
||||
unsigned int lflags = kmem_flags_convert(flags);
|
||||
|
@ -67,7 +67,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
|
|||
}
|
||||
|
||||
void *
|
||||
kmem_zalloc(size_t size, unsigned int __nocast flags)
|
||||
kmem_zalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
|
@ -90,7 +90,7 @@ kmem_free(void *ptr, size_t size)
|
|||
|
||||
void *
|
||||
kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
|
||||
unsigned int __nocast flags)
|
||||
gfp_t flags)
|
||||
{
|
||||
void *new;
|
||||
|
||||
|
@ -105,7 +105,7 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
|
|||
}
|
||||
|
||||
void *
|
||||
kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
|
||||
kmem_zone_alloc(kmem_zone_t *zone, gfp_t flags)
|
||||
{
|
||||
int retries = 0;
|
||||
unsigned int lflags = kmem_flags_convert(flags);
|
||||
|
@ -124,7 +124,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
|
|||
}
|
||||
|
||||
void *
|
||||
kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags)
|
||||
kmem_zone_zalloc(kmem_zone_t *zone, gfp_t flags)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ typedef unsigned long xfs_pflags_t;
|
|||
*(NSTATEP) = *(OSTATEP); \
|
||||
} while (0)
|
||||
|
||||
static __inline unsigned int kmem_flags_convert(unsigned int __nocast flags)
|
||||
static __inline unsigned int kmem_flags_convert(gfp_t flags)
|
||||
{
|
||||
unsigned int lflags = __GFP_NOWARN; /* we'll report problems, if need be */
|
||||
|
||||
|
@ -125,13 +125,12 @@ kmem_zone_destroy(kmem_zone_t *zone)
|
|||
BUG();
|
||||
}
|
||||
|
||||
extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast);
|
||||
extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast);
|
||||
extern void *kmem_zone_zalloc(kmem_zone_t *, gfp_t);
|
||||
extern void *kmem_zone_alloc(kmem_zone_t *, gfp_t);
|
||||
|
||||
extern void *kmem_alloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_realloc(void *, size_t, size_t,
|
||||
unsigned int __nocast);
|
||||
extern void *kmem_zalloc(size_t, unsigned int __nocast);
|
||||
extern void *kmem_alloc(size_t, gfp_t);
|
||||
extern void *kmem_realloc(void *, size_t, size_t, gfp_t);
|
||||
extern void *kmem_zalloc(size_t, gfp_t);
|
||||
extern void kmem_free(void *, size_t);
|
||||
|
||||
typedef struct shrinker *kmem_shaker_t;
|
||||
|
|
|
@ -35,7 +35,7 @@ dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
|
||||
static inline void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
unsigned int __nocast flag)
|
||||
gfp_t flag)
|
||||
{
|
||||
BUG_ON(dev->bus != &pci_bus_type);
|
||||
|
||||
|
@ -168,7 +168,7 @@ dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
|
||||
static inline void *
|
||||
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
unsigned int __nocast flag)
|
||||
gfp_t flag)
|
||||
{
|
||||
BUG();
|
||||
return NULL;
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
||||
|
||||
void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag);
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
|
||||
void dma_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
|
|
|
@ -61,7 +61,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle,
|
||||
unsigned int __nocast gfp)
|
||||
gfp_t gfp)
|
||||
{
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
return __dma_alloc_coherent(size, dma_handle, gfp);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
extern int dma_supported(struct device *dev, u64 mask);
|
||||
extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
||||
extern void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag);
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
|
||||
dma_addr_t dma_handle);
|
||||
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
|
||||
|
@ -118,7 +118,7 @@ dma_cache_sync(void *vaddr, size_t size,
|
|||
*/
|
||||
struct dma_mapping_ops {
|
||||
void * (*alloc_coherent)(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag);
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
void (*free_coherent)(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
dma_addr_t (*map_single)(struct device *dev, void *ptr,
|
||||
|
|
|
@ -122,7 +122,7 @@ extern void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
|
|||
int nelems, enum dma_data_direction direction);
|
||||
|
||||
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
|
||||
dma_addr_t *dma_handle, unsigned int __nocast flag);
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
|
||||
|
|
|
@ -467,7 +467,7 @@ static inline void atm_dev_put(struct atm_dev *dev)
|
|||
|
||||
int atm_charge(struct atm_vcc *vcc,int truesize);
|
||||
struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
|
||||
unsigned int __nocast gfp_flags);
|
||||
gfp_t gfp_flags);
|
||||
int atm_pcr_goal(struct atm_trafprm *tp);
|
||||
|
||||
void vcc_release_async(struct atm_vcc *vcc, int reply);
|
||||
|
|
|
@ -276,8 +276,8 @@ extern void bio_pair_release(struct bio_pair *dbio);
|
|||
extern struct bio_set *bioset_create(int, int, int);
|
||||
extern void bioset_free(struct bio_set *);
|
||||
|
||||
extern struct bio *bio_alloc(unsigned int __nocast, int);
|
||||
extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set *);
|
||||
extern struct bio *bio_alloc(gfp_t, int);
|
||||
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
|
||||
extern void bio_put(struct bio *);
|
||||
extern void bio_free(struct bio *, struct bio_set *);
|
||||
|
||||
|
@ -287,7 +287,7 @@ extern int bio_phys_segments(struct request_queue *, struct bio *);
|
|||
extern int bio_hw_segments(struct request_queue *, struct bio *);
|
||||
|
||||
extern void __bio_clone(struct bio *, struct bio *);
|
||||
extern struct bio *bio_clone(struct bio *, unsigned int __nocast);
|
||||
extern struct bio *bio_clone(struct bio *, gfp_t);
|
||||
|
||||
extern void bio_init(struct bio *);
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ void __brelse(struct buffer_head *);
|
|||
void __bforget(struct buffer_head *);
|
||||
void __breadahead(struct block_device *, sector_t block, int size);
|
||||
struct buffer_head *__bread(struct block_device *, sector_t block, int size);
|
||||
struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags);
|
||||
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
|
||||
void free_buffer_head(struct buffer_head * bh);
|
||||
void FASTCALL(unlock_buffer(struct buffer_head *bh));
|
||||
void FASTCALL(__lock_buffer(struct buffer_head *bh));
|
||||
|
|
|
@ -149,7 +149,7 @@ struct cn_dev {
|
|||
|
||||
int cn_add_callback(struct cb_id *, char *, void (*callback) (void *));
|
||||
void cn_del_callback(struct cb_id *);
|
||||
int cn_netlink_send(struct cn_msg *, u32, unsigned int __nocast);
|
||||
int cn_netlink_send(struct cn_msg *, u32, gfp_t);
|
||||
|
||||
int cn_queue_add_callback(struct cn_queue_dev *dev, char *name, struct cb_id *id, void (*callback)(void *));
|
||||
void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id);
|
||||
|
|
|
@ -23,7 +23,7 @@ void cpuset_init_current_mems_allowed(void);
|
|||
void cpuset_update_current_mems_allowed(void);
|
||||
void cpuset_restrict_to_mems_allowed(unsigned long *nodes);
|
||||
int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
|
||||
extern int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask);
|
||||
extern int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask);
|
||||
extern int cpuset_excl_nodes_overlap(const struct task_struct *p);
|
||||
extern struct file_operations proc_cpuset_operations;
|
||||
extern char *cpuset_task_status_allowed(struct task_struct *task, char *buffer);
|
||||
|
@ -49,8 +49,7 @@ static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static inline int cpuset_zone_allowed(struct zone *z,
|
||||
unsigned int __nocast gfp_mask)
|
||||
static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
|
|||
|
||||
void dma_pool_destroy(struct dma_pool *pool);
|
||||
|
||||
void *dma_pool_alloc(struct dma_pool *pool, unsigned int __nocast mem_flags,
|
||||
void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
|
||||
dma_addr_t *handle);
|
||||
|
||||
void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
|
||||
|
|
|
@ -85,9 +85,9 @@ static inline void arch_free_page(struct page *page, int order) { }
|
|||
#endif
|
||||
|
||||
extern struct page *
|
||||
FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
|
||||
FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
|
||||
|
||||
static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_mask,
|
||||
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
|
||||
unsigned int order)
|
||||
{
|
||||
if (unlikely(order >= MAX_ORDER))
|
||||
|
@ -98,17 +98,17 @@ static inline struct page *alloc_pages_node(int nid, unsigned int __nocast gfp_m
|
|||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask, unsigned order);
|
||||
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
|
||||
|
||||
static inline struct page *
|
||||
alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
|
||||
alloc_pages(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
if (unlikely(order >= MAX_ORDER))
|
||||
return NULL;
|
||||
|
||||
return alloc_pages_current(gfp_mask, order);
|
||||
}
|
||||
extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
|
||||
extern struct page *alloc_page_vma(gfp_t gfp_mask,
|
||||
struct vm_area_struct *vma, unsigned long addr);
|
||||
#else
|
||||
#define alloc_pages(gfp_mask, order) \
|
||||
|
@ -117,8 +117,8 @@ extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
|
|||
#endif
|
||||
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
|
||||
|
||||
extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask, unsigned int order));
|
||||
extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
|
||||
extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order));
|
||||
extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
|
||||
|
||||
#define __get_free_page(gfp_mask) \
|
||||
__get_free_pages((gfp_mask),0)
|
||||
|
|
|
@ -935,7 +935,7 @@ void journal_put_journal_head(struct journal_head *jh);
|
|||
*/
|
||||
extern kmem_cache_t *jbd_handle_cache;
|
||||
|
||||
static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags)
|
||||
static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags)
|
||||
{
|
||||
return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
|
||||
}
|
||||
|
|
|
@ -35,8 +35,8 @@ struct kfifo {
|
|||
};
|
||||
|
||||
extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
|
||||
unsigned int __nocast gfp_mask, spinlock_t *lock);
|
||||
extern struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask,
|
||||
gfp_t gfp_mask, spinlock_t *lock);
|
||||
extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
|
||||
spinlock_t *lock);
|
||||
extern void kfifo_free(struct kfifo *fifo);
|
||||
extern unsigned int __kfifo_put(struct kfifo *fifo,
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
#include <linux/wait.h>
|
||||
|
||||
typedef void * (mempool_alloc_t)(unsigned int __nocast gfp_mask, void *pool_data);
|
||||
typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
|
||||
typedef void (mempool_free_t)(void *element, void *pool_data);
|
||||
|
||||
typedef struct mempool_s {
|
||||
|
@ -26,17 +26,16 @@ extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
|
|||
extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
|
||||
mempool_free_t *free_fn, void *pool_data, int nid);
|
||||
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr,
|
||||
unsigned int __nocast gfp_mask);
|
||||
extern int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask);
|
||||
extern void mempool_destroy(mempool_t *pool);
|
||||
extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask);
|
||||
extern void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask);
|
||||
extern void mempool_free(void *element, mempool_t *pool);
|
||||
|
||||
/*
|
||||
* A mempool_alloc_t and mempool_free_t that get the memory from
|
||||
* a slab that is passed in through pool_data.
|
||||
*/
|
||||
void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data);
|
||||
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
|
||||
void mempool_free_slab(void *element, void *pool_data);
|
||||
|
||||
#endif /* _LINUX_MEMPOOL_H */
|
||||
|
|
|
@ -131,7 +131,7 @@ extern struct sock *netlink_kernel_create(int unit, unsigned int groups, void (*
|
|||
extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
|
||||
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 pid, int nonblock);
|
||||
extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 pid,
|
||||
__u32 group, unsigned int __nocast allocation);
|
||||
__u32 group, gfp_t allocation);
|
||||
extern void netlink_set_err(struct sock *ssk, __u32 pid, __u32 group, int code);
|
||||
extern int netlink_register_notifier(struct notifier_block *nb);
|
||||
extern int netlink_unregister_notifier(struct notifier_block *nb);
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
|
||||
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
|
||||
|
||||
static inline unsigned int __nocast mapping_gfp_mask(struct address_space * mapping)
|
||||
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
|
||||
{
|
||||
return mapping->flags & __GFP_BITS_MASK;
|
||||
}
|
||||
|
|
|
@ -71,11 +71,11 @@ posix_acl_release(struct posix_acl *acl)
|
|||
|
||||
/* posix_acl.c */
|
||||
|
||||
extern struct posix_acl *posix_acl_alloc(int, unsigned int __nocast);
|
||||
extern struct posix_acl *posix_acl_clone(const struct posix_acl *, unsigned int __nocast);
|
||||
extern struct posix_acl *posix_acl_alloc(int, gfp_t);
|
||||
extern struct posix_acl *posix_acl_clone(const struct posix_acl *, gfp_t);
|
||||
extern int posix_acl_valid(const struct posix_acl *);
|
||||
extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
|
||||
extern struct posix_acl *posix_acl_from_mode(mode_t, unsigned int __nocast);
|
||||
extern struct posix_acl *posix_acl_from_mode(mode_t, gfp_t);
|
||||
extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *);
|
||||
extern int posix_acl_create_masq(struct posix_acl *, mode_t *);
|
||||
extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
|
||||
|
|
|
@ -50,7 +50,7 @@ void *radix_tree_delete(struct radix_tree_root *, unsigned long);
|
|||
unsigned int
|
||||
radix_tree_gang_lookup(struct radix_tree_root *root, void **results,
|
||||
unsigned long first_index, unsigned int max_items);
|
||||
int radix_tree_preload(unsigned int __nocast gfp_mask);
|
||||
int radix_tree_preload(gfp_t gfp_mask);
|
||||
void radix_tree_init(void);
|
||||
void *radix_tree_tag_set(struct radix_tree_root *root,
|
||||
unsigned long index, int tag);
|
||||
|
|
|
@ -2634,8 +2634,7 @@ static inline int security_socket_getpeersec(struct socket *sock, char __user *o
|
|||
return security_ops->socket_getpeersec(sock, optval, optlen, len);
|
||||
}
|
||||
|
||||
static inline int security_sk_alloc(struct sock *sk, int family,
|
||||
unsigned int __nocast priority)
|
||||
static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
|
||||
{
|
||||
return security_ops->sk_alloc_security(sk, family, priority);
|
||||
}
|
||||
|
@ -2752,8 +2751,7 @@ static inline int security_socket_getpeersec(struct socket *sock, char __user *o
|
|||
return -ENOPROTOOPT;
|
||||
}
|
||||
|
||||
static inline int security_sk_alloc(struct sock *sk, int family,
|
||||
unsigned int __nocast priority)
|
||||
static inline int security_sk_alloc(struct sock *sk, int family, gfp_t priority)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -302,37 +302,37 @@ struct sk_buff {
|
|||
|
||||
extern void __kfree_skb(struct sk_buff *skb);
|
||||
extern struct sk_buff *__alloc_skb(unsigned int size,
|
||||
unsigned int __nocast priority, int fclone);
|
||||
gfp_t priority, int fclone);
|
||||
static inline struct sk_buff *alloc_skb(unsigned int size,
|
||||
unsigned int __nocast priority)
|
||||
gfp_t priority)
|
||||
{
|
||||
return __alloc_skb(size, priority, 0);
|
||||
}
|
||||
|
||||
static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
|
||||
unsigned int __nocast priority)
|
||||
gfp_t priority)
|
||||
{
|
||||
return __alloc_skb(size, priority, 1);
|
||||
}
|
||||
|
||||
extern struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp,
|
||||
unsigned int size,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern void kfree_skbmem(struct sk_buff *skb);
|
||||
extern struct sk_buff *skb_clone(struct sk_buff *skb,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern struct sk_buff *skb_copy(const struct sk_buff *skb,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern struct sk_buff *pskb_copy(struct sk_buff *skb,
|
||||
unsigned int __nocast gfp_mask);
|
||||
gfp_t gfp_mask);
|
||||
extern int pskb_expand_head(struct sk_buff *skb,
|
||||
int nhead, int ntail,
|
||||
unsigned int __nocast gfp_mask);
|
||||
gfp_t gfp_mask);
|
||||
extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
|
||||
unsigned int headroom);
|
||||
extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
|
||||
int newheadroom, int newtailroom,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
|
||||
#define dev_kfree_skb(a) kfree_skb(a)
|
||||
extern void skb_over_panic(struct sk_buff *skb, int len,
|
||||
|
@ -484,7 +484,7 @@ static inline int skb_shared(const struct sk_buff *skb)
|
|||
* NULL is returned on a memory allocation failure.
|
||||
*/
|
||||
static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
|
||||
unsigned int __nocast pri)
|
||||
gfp_t pri)
|
||||
{
|
||||
might_sleep_if(pri & __GFP_WAIT);
|
||||
if (skb_shared(skb)) {
|
||||
|
@ -516,7 +516,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
|
|||
* %NULL is returned on a memory allocation failure.
|
||||
*/
|
||||
static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
|
||||
unsigned int __nocast pri)
|
||||
gfp_t pri)
|
||||
{
|
||||
might_sleep_if(pri & __GFP_WAIT);
|
||||
if (skb_cloned(skb)) {
|
||||
|
@ -1017,7 +1017,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
|
|||
* %NULL is returned in there is no free memory.
|
||||
*/
|
||||
static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct sk_buff *skb = alloc_skb(length + 16, gfp_mask);
|
||||
if (likely(skb))
|
||||
|
@ -1130,8 +1130,8 @@ static inline int skb_can_coalesce(struct sk_buff *skb, int i,
|
|||
* If there is no free memory -ENOMEM is returned, otherwise zero
|
||||
* is returned and the old skb data released.
|
||||
*/
|
||||
extern int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp);
|
||||
static inline int skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp)
|
||||
extern int __skb_linearize(struct sk_buff *skb, gfp_t gfp);
|
||||
static inline int skb_linearize(struct sk_buff *skb, gfp_t gfp)
|
||||
{
|
||||
return __skb_linearize(skb, gfp);
|
||||
}
|
||||
|
|
|
@ -61,11 +61,11 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo
|
|||
void (*)(void *, kmem_cache_t *, unsigned long));
|
||||
extern int kmem_cache_destroy(kmem_cache_t *);
|
||||
extern int kmem_cache_shrink(kmem_cache_t *);
|
||||
extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
|
||||
extern void *kmem_cache_alloc(kmem_cache_t *, gfp_t);
|
||||
extern void kmem_cache_free(kmem_cache_t *, void *);
|
||||
extern unsigned int kmem_cache_size(kmem_cache_t *);
|
||||
extern const char *kmem_cache_name(kmem_cache_t *);
|
||||
extern kmem_cache_t *kmem_find_general_cachep(size_t size, unsigned int __nocast gfpflags);
|
||||
extern kmem_cache_t *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
|
||||
|
||||
/* Size description struct for general caches. */
|
||||
struct cache_sizes {
|
||||
|
@ -74,9 +74,9 @@ struct cache_sizes {
|
|||
kmem_cache_t *cs_dmacachep;
|
||||
};
|
||||
extern struct cache_sizes malloc_sizes[];
|
||||
extern void *__kmalloc(size_t, unsigned int __nocast);
|
||||
extern void *__kmalloc(size_t, gfp_t);
|
||||
|
||||
static inline void *kmalloc(size_t size, unsigned int __nocast flags)
|
||||
static inline void *kmalloc(size_t size, gfp_t flags)
|
||||
{
|
||||
if (__builtin_constant_p(size)) {
|
||||
int i = 0;
|
||||
|
@ -99,7 +99,7 @@ static inline void *kmalloc(size_t size, unsigned int __nocast flags)
|
|||
return __kmalloc(size, flags);
|
||||
}
|
||||
|
||||
extern void *kzalloc(size_t, unsigned int __nocast);
|
||||
extern void *kzalloc(size_t, gfp_t);
|
||||
|
||||
/**
|
||||
* kcalloc - allocate memory for an array. The memory is set to zero.
|
||||
|
@ -107,7 +107,7 @@ extern void *kzalloc(size_t, unsigned int __nocast);
|
|||
* @size: element size.
|
||||
* @flags: the type of memory to allocate.
|
||||
*/
|
||||
static inline void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
|
||||
static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
|
||||
{
|
||||
if (n != 0 && size > INT_MAX / n)
|
||||
return NULL;
|
||||
|
@ -118,15 +118,14 @@ extern void kfree(const void *);
|
|||
extern unsigned int ksize(const void *);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
extern void *kmem_cache_alloc_node(kmem_cache_t *,
|
||||
unsigned int __nocast flags, int node);
|
||||
extern void *kmalloc_node(size_t size, unsigned int __nocast flags, int node);
|
||||
extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node);
|
||||
extern void *kmalloc_node(size_t size, gfp_t flags, int node);
|
||||
#else
|
||||
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
|
||||
{
|
||||
return kmem_cache_alloc(cachep, flags);
|
||||
}
|
||||
static inline void *kmalloc_node(size_t size, unsigned int __nocast flags, int node)
|
||||
static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
return kmalloc(size, flags);
|
||||
}
|
||||
|
|
|
@ -88,7 +88,7 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
|
|||
extern void * memchr(const void *,int,__kernel_size_t);
|
||||
#endif
|
||||
|
||||
extern char *kstrdup(const char *s, unsigned int __nocast gfp);
|
||||
extern char *kstrdup(const char *s, gfp_t gfp);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ struct swap_list_t {
|
|||
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
|
||||
|
||||
/* linux/mm/oom_kill.c */
|
||||
extern void out_of_memory(unsigned int __nocast gfp_mask, int order);
|
||||
extern void out_of_memory(gfp_t gfp_mask, int order);
|
||||
|
||||
/* linux/mm/memory.c */
|
||||
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct *);
|
||||
|
|
|
@ -159,7 +159,7 @@ extern unsigned int textsearch_find_continuous(struct ts_config *,
|
|||
#define TS_PRIV_ALIGN(len) (((len) + TS_PRIV_ALIGNTO-1) & ~(TS_PRIV_ALIGNTO-1))
|
||||
|
||||
static inline struct ts_config *alloc_ts_config(size_t payload,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct ts_config *conf;
|
||||
|
||||
|
|
|
@ -165,6 +165,10 @@ typedef __u64 __bitwise __le64;
|
|||
typedef __u64 __bitwise __be64;
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
typedef unsigned __nocast gfp_t;
|
||||
#endif
|
||||
|
||||
struct ustat {
|
||||
__kernel_daddr_t f_tfree;
|
||||
__kernel_ino_t f_tinode;
|
||||
|
|
|
@ -34,8 +34,8 @@ struct vm_struct {
|
|||
extern void *vmalloc(unsigned long size);
|
||||
extern void *vmalloc_exec(unsigned long size);
|
||||
extern void *vmalloc_32(unsigned long size);
|
||||
extern void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t prot);
|
||||
extern void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask, pgprot_t prot);
|
||||
extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
|
||||
extern void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot);
|
||||
extern void vfree(void *addr);
|
||||
|
||||
extern void *vmap(struct page **pages, unsigned int count,
|
||||
|
|
|
@ -136,7 +136,7 @@ struct bt_skb_cb {
|
|||
};
|
||||
#define bt_cb(skb) ((struct bt_skb_cb *)(skb->cb))
|
||||
|
||||
static inline struct sk_buff *bt_skb_alloc(unsigned int len, unsigned int __nocast how)
|
||||
static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci,
|
|||
u8 xon_char, u8 xoff_char, u16 param_mask);
|
||||
|
||||
/* ---- RFCOMM DLCs (channels) ---- */
|
||||
struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio);
|
||||
struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio);
|
||||
void rfcomm_dlc_free(struct rfcomm_dlc *d);
|
||||
int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel);
|
||||
int rfcomm_dlc_close(struct rfcomm_dlc *d, int reason);
|
||||
|
|
|
@ -19,9 +19,9 @@ extern void dn_nsp_send_data_ack(struct sock *sk);
|
|||
extern void dn_nsp_send_oth_ack(struct sock *sk);
|
||||
extern void dn_nsp_delayed_ack(struct sock *sk);
|
||||
extern void dn_send_conn_ack(struct sock *sk);
|
||||
extern void dn_send_conn_conf(struct sock *sk, unsigned int __nocast gfp);
|
||||
extern void dn_send_conn_conf(struct sock *sk, gfp_t gfp);
|
||||
extern void dn_nsp_send_disc(struct sock *sk, unsigned char type,
|
||||
unsigned short reason, unsigned int __nocast gfp);
|
||||
unsigned short reason, gfp_t gfp);
|
||||
extern void dn_nsp_return_disc(struct sk_buff *skb, unsigned char type,
|
||||
unsigned short reason);
|
||||
extern void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval);
|
||||
|
@ -29,14 +29,14 @@ extern void dn_nsp_send_conninit(struct sock *sk, unsigned char flags);
|
|||
|
||||
extern void dn_nsp_output(struct sock *sk);
|
||||
extern int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum);
|
||||
extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, unsigned int __nocast gfp, int oob);
|
||||
extern void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, gfp_t gfp, int oob);
|
||||
extern unsigned long dn_nsp_persist(struct sock *sk);
|
||||
extern int dn_nsp_xmit_timeout(struct sock *sk);
|
||||
|
||||
extern int dn_nsp_rx(struct sk_buff *);
|
||||
extern int dn_nsp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
|
||||
|
||||
extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri);
|
||||
extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
|
||||
extern struct sk_buff *dn_alloc_send_skb(struct sock *sk, size_t *size, int noblock, long timeo, int *err);
|
||||
|
||||
#define NSP_REASON_OK 0 /* No error */
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
GNU General Public License for more details.
|
||||
*******************************************************************************/
|
||||
|
||||
extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, unsigned int __nocast pri);
|
||||
extern struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri);
|
||||
extern int dn_route_output_sock(struct dst_entry **pprt, struct flowi *, struct sock *sk, int flags);
|
||||
extern int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
extern int dn_cache_getroute(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
|
||||
|
|
|
@ -94,7 +94,7 @@ static inline void *inet_csk_ca(const struct sock *sk)
|
|||
|
||||
extern struct sock *inet_csk_clone(struct sock *sk,
|
||||
const struct request_sock *req,
|
||||
const unsigned int __nocast priority);
|
||||
const gfp_t priority);
|
||||
|
||||
enum inet_csk_ack_state_t {
|
||||
ICSK_ACK_SCHED = 1,
|
||||
|
|
|
@ -832,7 +832,7 @@ extern void ip_vs_app_inc_put(struct ip_vs_app *inc);
|
|||
|
||||
extern int ip_vs_app_pkt_out(struct ip_vs_conn *, struct sk_buff **pskb);
|
||||
extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff **pskb);
|
||||
extern int ip_vs_skb_replace(struct sk_buff *skb, unsigned int __nocast pri,
|
||||
extern int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
|
||||
char *o_buf, int o_len, char *n_buf, int n_len);
|
||||
extern int ip_vs_app_init(void);
|
||||
extern void ip_vs_app_cleanup(void);
|
||||
|
|
|
@ -93,7 +93,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
|
|||
return skb->cb[sizeof(skb->cb) - 1];
|
||||
}
|
||||
|
||||
extern struct sock *llc_sk_alloc(int family, unsigned int __nocast priority,
|
||||
extern struct sock *llc_sk_alloc(int family, gfp_t priority,
|
||||
struct proto *prot);
|
||||
extern void llc_sk_free(struct sock *sk);
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@
|
|||
*/
|
||||
extern struct sock *sctp_get_ctl_sock(void);
|
||||
extern int sctp_copy_local_addr_list(struct sctp_bind_addr *,
|
||||
sctp_scope_t, unsigned int __nocast gfp,
|
||||
sctp_scope_t, gfp_t gfp,
|
||||
int flags);
|
||||
extern struct sctp_pf *sctp_get_pf_specific(sa_family_t family);
|
||||
extern int sctp_register_pf(struct sctp_pf *, sa_family_t);
|
||||
|
|
|
@ -181,17 +181,17 @@ const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t,
|
|||
int sctp_chunk_iif(const struct sctp_chunk *);
|
||||
struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *,
|
||||
struct sctp_chunk *,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
__u32 sctp_generate_verification_tag(void);
|
||||
void sctp_populate_tie_tags(__u8 *cookie, __u32 curTag, __u32 hisTag);
|
||||
|
||||
/* Prototypes for chunk-building functions. */
|
||||
struct sctp_chunk *sctp_make_init(const struct sctp_association *,
|
||||
const struct sctp_bind_addr *,
|
||||
unsigned int __nocast gfp, int vparam_len);
|
||||
gfp_t gfp, int vparam_len);
|
||||
struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *,
|
||||
const struct sctp_chunk *,
|
||||
const unsigned int __nocast gfp,
|
||||
const gfp_t gfp,
|
||||
const int unkparam_len);
|
||||
struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *,
|
||||
const struct sctp_chunk *);
|
||||
|
@ -265,7 +265,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype,
|
|||
struct sctp_endpoint *,
|
||||
struct sctp_association *asoc,
|
||||
void *event_arg,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
/* 2nd level prototypes */
|
||||
void sctp_generate_t3_rtx_event(unsigned long peer);
|
||||
|
@ -276,7 +276,7 @@ void sctp_ootb_pkt_free(struct sctp_packet *);
|
|||
struct sctp_association *sctp_unpack_cookie(const struct sctp_endpoint *,
|
||||
const struct sctp_association *,
|
||||
struct sctp_chunk *,
|
||||
unsigned int __nocast gfp, int *err,
|
||||
gfp_t gfp, int *err,
|
||||
struct sctp_chunk **err_chk_p);
|
||||
int sctp_addip_addr_config(struct sctp_association *, sctp_param_t,
|
||||
struct sockaddr_storage*, int);
|
||||
|
|
|
@ -446,7 +446,7 @@ struct sctp_ssnmap {
|
|||
};
|
||||
|
||||
struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
void sctp_ssnmap_free(struct sctp_ssnmap *map);
|
||||
void sctp_ssnmap_clear(struct sctp_ssnmap *map);
|
||||
|
||||
|
@ -947,7 +947,7 @@ struct sctp_transport {
|
|||
};
|
||||
|
||||
struct sctp_transport *sctp_transport_new(const union sctp_addr *,
|
||||
unsigned int __nocast);
|
||||
gfp_t);
|
||||
void sctp_transport_set_owner(struct sctp_transport *,
|
||||
struct sctp_association *);
|
||||
void sctp_transport_route(struct sctp_transport *, union sctp_addr *,
|
||||
|
@ -1095,10 +1095,10 @@ void sctp_bind_addr_init(struct sctp_bind_addr *, __u16 port);
|
|||
void sctp_bind_addr_free(struct sctp_bind_addr *);
|
||||
int sctp_bind_addr_copy(struct sctp_bind_addr *dest,
|
||||
const struct sctp_bind_addr *src,
|
||||
sctp_scope_t scope, unsigned int __nocast gfp,
|
||||
sctp_scope_t scope, gfp_t gfp,
|
||||
int flags);
|
||||
int sctp_add_bind_addr(struct sctp_bind_addr *, union sctp_addr *,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
int sctp_del_bind_addr(struct sctp_bind_addr *, union sctp_addr *);
|
||||
int sctp_bind_addr_match(struct sctp_bind_addr *, const union sctp_addr *,
|
||||
struct sctp_sock *);
|
||||
|
@ -1108,9 +1108,9 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp,
|
|||
struct sctp_sock *opt);
|
||||
union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp,
|
||||
int *addrs_len,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw, int len,
|
||||
__u16 port, unsigned int __nocast gfp);
|
||||
__u16 port, gfp_t gfp);
|
||||
|
||||
sctp_scope_t sctp_scope(const union sctp_addr *);
|
||||
int sctp_in_scope(const union sctp_addr *addr, const sctp_scope_t scope);
|
||||
|
@ -1239,7 +1239,7 @@ static inline struct sctp_endpoint *sctp_ep(struct sctp_ep_common *base)
|
|||
}
|
||||
|
||||
/* These are function signatures for manipulating endpoints. */
|
||||
struct sctp_endpoint *sctp_endpoint_new(struct sock *, unsigned int __nocast);
|
||||
struct sctp_endpoint *sctp_endpoint_new(struct sock *, gfp_t);
|
||||
void sctp_endpoint_free(struct sctp_endpoint *);
|
||||
void sctp_endpoint_put(struct sctp_endpoint *);
|
||||
void sctp_endpoint_hold(struct sctp_endpoint *);
|
||||
|
@ -1260,7 +1260,7 @@ int sctp_verify_init(const struct sctp_association *asoc, sctp_cid_t,
|
|||
struct sctp_chunk **err_chunk);
|
||||
int sctp_process_init(struct sctp_association *, sctp_cid_t cid,
|
||||
const union sctp_addr *peer,
|
||||
sctp_init_chunk_t *init, unsigned int __nocast gfp);
|
||||
sctp_init_chunk_t *init, gfp_t gfp);
|
||||
__u32 sctp_generate_tag(const struct sctp_endpoint *);
|
||||
__u32 sctp_generate_tsn(const struct sctp_endpoint *);
|
||||
|
||||
|
@ -1723,7 +1723,7 @@ static inline struct sctp_association *sctp_assoc(struct sctp_ep_common *base)
|
|||
|
||||
struct sctp_association *
|
||||
sctp_association_new(const struct sctp_endpoint *, const struct sock *,
|
||||
sctp_scope_t scope, unsigned int __nocast gfp);
|
||||
sctp_scope_t scope, gfp_t gfp);
|
||||
void sctp_association_free(struct sctp_association *);
|
||||
void sctp_association_put(struct sctp_association *);
|
||||
void sctp_association_hold(struct sctp_association *);
|
||||
|
@ -1739,7 +1739,7 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
|
|||
const union sctp_addr *laddr);
|
||||
struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *,
|
||||
const union sctp_addr *address,
|
||||
const unsigned int __nocast gfp,
|
||||
const gfp_t gfp,
|
||||
const int peer_state);
|
||||
void sctp_assoc_del_peer(struct sctp_association *asoc,
|
||||
const union sctp_addr *addr);
|
||||
|
@ -1764,10 +1764,10 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned);
|
|||
void sctp_assoc_set_primary(struct sctp_association *,
|
||||
struct sctp_transport *);
|
||||
int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
|
||||
unsigned int __nocast);
|
||||
gfp_t);
|
||||
int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
|
||||
struct sctp_cookie*,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
int sctp_cmp_addr_exact(const union sctp_addr *ss1,
|
||||
const union sctp_addr *ss2);
|
||||
|
|
|
@ -88,7 +88,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
|
|||
__u16 error,
|
||||
__u16 outbound,
|
||||
__u16 inbound,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
|
||||
const struct sctp_association *asoc,
|
||||
|
@ -96,35 +96,35 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
|
|||
int flags,
|
||||
int state,
|
||||
int error,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
|
||||
const struct sctp_association *asoc,
|
||||
struct sctp_chunk *chunk,
|
||||
__u16 flags,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
|
||||
const struct sctp_association *asoc,
|
||||
struct sctp_chunk *chunk,
|
||||
__u16 flags,
|
||||
__u32 error,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
|
||||
const struct sctp_association *asoc,
|
||||
__u16 flags,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_pdapi(
|
||||
const struct sctp_association *asoc,
|
||||
__u32 indication, unsigned int __nocast gfp);
|
||||
__u32 indication, gfp_t gfp);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication(
|
||||
const struct sctp_association *asoc, unsigned int __nocast gfp);
|
||||
const struct sctp_association *asoc, gfp_t gfp);
|
||||
|
||||
struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
|
||||
struct sctp_chunk *chunk,
|
||||
unsigned int __nocast gfp);
|
||||
gfp_t gfp);
|
||||
|
||||
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
|
||||
struct msghdr *);
|
||||
|
|
|
@ -62,22 +62,19 @@ struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *,
|
|||
void sctp_ulpq_free(struct sctp_ulpq *);
|
||||
|
||||
/* Add a new DATA chunk for processing. */
|
||||
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *,
|
||||
unsigned int __nocast);
|
||||
int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
||||
|
||||
/* Add a new event for propagation to the ULP. */
|
||||
int sctp_ulpq_tail_event(struct sctp_ulpq *, struct sctp_ulpevent *ev);
|
||||
|
||||
/* Renege previously received chunks. */
|
||||
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *,
|
||||
unsigned int __nocast);
|
||||
void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
||||
|
||||
/* Perform partial delivery. */
|
||||
void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *,
|
||||
unsigned int __nocast);
|
||||
void sctp_ulpq_partial_delivery(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
|
||||
|
||||
/* Abort the partial delivery. */
|
||||
void sctp_ulpq_abort_pd(struct sctp_ulpq *, unsigned int __nocast);
|
||||
void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
|
||||
|
||||
/* Clear the partial data delivery condition on this socket. */
|
||||
int sctp_clear_pd(struct sock *sk);
|
||||
|
|
|
@ -739,18 +739,18 @@ extern void FASTCALL(release_sock(struct sock *sk));
|
|||
#define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
|
||||
|
||||
extern struct sock *sk_alloc(int family,
|
||||
unsigned int __nocast priority,
|
||||
gfp_t priority,
|
||||
struct proto *prot, int zero_it);
|
||||
extern void sk_free(struct sock *sk);
|
||||
extern struct sock *sk_clone(const struct sock *sk,
|
||||
const unsigned int __nocast priority);
|
||||
const gfp_t priority);
|
||||
|
||||
extern struct sk_buff *sock_wmalloc(struct sock *sk,
|
||||
unsigned long size, int force,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern struct sk_buff *sock_rmalloc(struct sock *sk,
|
||||
unsigned long size, int force,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern void sock_wfree(struct sk_buff *skb);
|
||||
extern void sock_rfree(struct sk_buff *skb);
|
||||
|
||||
|
@ -766,7 +766,7 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
|
|||
int noblock,
|
||||
int *errcode);
|
||||
extern void *sock_kmalloc(struct sock *sk, int size,
|
||||
unsigned int __nocast priority);
|
||||
gfp_t priority);
|
||||
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
|
||||
extern void sk_send_sigurg(struct sock *sk);
|
||||
|
||||
|
@ -1201,7 +1201,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
|
|||
|
||||
static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
|
||||
int size, int mem,
|
||||
unsigned int __nocast gfp)
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
int hdr_len;
|
||||
|
@ -1224,7 +1224,7 @@ static inline struct sk_buff *sk_stream_alloc_pskb(struct sock *sk,
|
|||
|
||||
static inline struct sk_buff *sk_stream_alloc_skb(struct sock *sk,
|
||||
int size,
|
||||
unsigned int __nocast gfp)
|
||||
gfp_t gfp)
|
||||
{
|
||||
return sk_stream_alloc_pskb(sk, size, 0, gfp);
|
||||
}
|
||||
|
@ -1255,7 +1255,7 @@ static inline int sock_writeable(const struct sock *sk)
|
|||
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf / 2);
|
||||
}
|
||||
|
||||
static inline unsigned int __nocast gfp_any(void)
|
||||
static inline gfp_t gfp_any(void)
|
||||
{
|
||||
return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
|
||||
}
|
||||
|
|
|
@ -460,8 +460,7 @@ extern void tcp_send_probe0(struct sock *);
|
|||
extern void tcp_send_partial(struct sock *);
|
||||
extern int tcp_write_wakeup(struct sock *);
|
||||
extern void tcp_send_fin(struct sock *sk);
|
||||
extern void tcp_send_active_reset(struct sock *sk,
|
||||
unsigned int __nocast priority);
|
||||
extern void tcp_send_active_reset(struct sock *sk, gfp_t priority);
|
||||
extern int tcp_send_synack(struct sock *);
|
||||
extern void tcp_push_one(struct sock *, unsigned int mss_now);
|
||||
extern void tcp_send_ack(struct sock *sk);
|
||||
|
|
|
@ -875,7 +875,7 @@ static inline int xfrm_dst_lookup(struct xfrm_dst **dst, struct flowi *fl, unsig
|
|||
}
|
||||
#endif
|
||||
|
||||
struct xfrm_policy *xfrm_policy_alloc(unsigned int __nocast gfp);
|
||||
struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp);
|
||||
extern int xfrm_policy_walk(int (*func)(struct xfrm_policy *, int, int, void*), void *);
|
||||
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
|
||||
struct xfrm_policy *xfrm_policy_bysel(int dir, struct xfrm_selector *sel,
|
||||
|
|
|
@ -596,7 +596,7 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
|
|||
u32 remote_qpn, u16 pkey_index,
|
||||
struct ib_ah *ah, int rmpp_active,
|
||||
int hdr_len, int data_len,
|
||||
unsigned int __nocast gfp_mask);
|
||||
gfp_t gfp_mask);
|
||||
|
||||
/**
|
||||
* ib_free_send_mad - Returns data buffers used to send a MAD.
|
||||
|
|
|
@ -285,7 +285,7 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query);
|
|||
int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
|
||||
struct ib_sa_path_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_path_rec *resp,
|
||||
void *context),
|
||||
|
@ -296,7 +296,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
|
|||
u8 method,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_mcmember_rec *resp,
|
||||
void *context),
|
||||
|
@ -307,7 +307,7 @@ int ib_sa_service_rec_query(struct ib_device *device, u8 port_num,
|
|||
u8 method,
|
||||
struct ib_sa_service_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_service_rec *resp,
|
||||
void *context),
|
||||
|
@ -342,7 +342,7 @@ static inline int
|
|||
ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_mcmember_rec *resp,
|
||||
void *context),
|
||||
|
@ -384,7 +384,7 @@ static inline int
|
|||
ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
|
||||
struct ib_sa_mcmember_rec *rec,
|
||||
ib_sa_comp_mask comp_mask,
|
||||
int timeout_ms, unsigned int __nocast gfp_mask,
|
||||
int timeout_ms, gfp_t gfp_mask,
|
||||
void (*callback)(int status,
|
||||
struct ib_sa_mcmember_rec *resp,
|
||||
void *context),
|
||||
|
|
|
@ -203,7 +203,7 @@ extern int rxrpc_call_write_data(struct rxrpc_call *call,
|
|||
size_t sioc,
|
||||
struct kvec *siov,
|
||||
uint8_t rxhdr_flags,
|
||||
unsigned int __nocast alloc_flags,
|
||||
gfp_t alloc_flags,
|
||||
int dup_data,
|
||||
size_t *size_sent);
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ extern int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
|
|||
uint8_t type,
|
||||
int count,
|
||||
struct kvec *diov,
|
||||
unsigned int __nocast alloc_flags,
|
||||
gfp_t alloc_flags,
|
||||
struct rxrpc_message **_msg);
|
||||
|
||||
extern int rxrpc_conn_sendmsg(struct rxrpc_connection *conn, struct rxrpc_message *msg);
|
||||
|
|
|
@ -290,13 +290,13 @@ void snd_memory_init(void);
|
|||
void snd_memory_done(void);
|
||||
int snd_memory_info_init(void);
|
||||
int snd_memory_info_done(void);
|
||||
void *snd_hidden_kmalloc(size_t size, unsigned int __nocast flags);
|
||||
void *snd_hidden_kzalloc(size_t size, unsigned int __nocast flags);
|
||||
void *snd_hidden_kcalloc(size_t n, size_t size, unsigned int __nocast flags);
|
||||
void *snd_hidden_kmalloc(size_t size, gfp_t flags);
|
||||
void *snd_hidden_kzalloc(size_t size, gfp_t flags);
|
||||
void *snd_hidden_kcalloc(size_t n, size_t size, gfp_t flags);
|
||||
void snd_hidden_kfree(const void *obj);
|
||||
void *snd_hidden_vmalloc(unsigned long size);
|
||||
void snd_hidden_vfree(void *obj);
|
||||
char *snd_hidden_kstrdup(const char *s, unsigned int __nocast flags);
|
||||
char *snd_hidden_kstrdup(const char *s, gfp_t flags);
|
||||
#define kmalloc(size, flags) snd_hidden_kmalloc(size, flags)
|
||||
#define kzalloc(size, flags) snd_hidden_kzalloc(size, flags)
|
||||
#define kcalloc(n, size, flags) snd_hidden_kcalloc(n, size, flags)
|
||||
|
|
|
@ -51,7 +51,7 @@
|
|||
#ifdef CONFIG_SND_DEBUG_MEMORY
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
void *snd_wrapper_kmalloc(size_t, unsigned int __nocast);
|
||||
void *snd_wrapper_kmalloc(size_t, gfp_t);
|
||||
#undef kmalloc
|
||||
void snd_wrapper_kfree(const void *);
|
||||
#undef kfree
|
||||
|
|
|
@ -560,7 +560,7 @@ static void audit_buffer_free(struct audit_buffer *ab)
|
|||
}
|
||||
|
||||
static struct audit_buffer * audit_buffer_alloc(struct audit_context *ctx,
|
||||
unsigned int __nocast gfp_mask, int type)
|
||||
gfp_t gfp_mask, int type)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct audit_buffer *ab = NULL;
|
||||
|
|
|
@ -1670,7 +1670,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
|
|||
* GFP_USER - only nodes in current tasks mems allowed ok.
|
||||
**/
|
||||
|
||||
int cpuset_zone_allowed(struct zone *z, unsigned int __nocast gfp_mask)
|
||||
int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
int node; /* node that zone z is on */
|
||||
const struct cpuset *cs; /* current cpuset ancestors */
|
||||
|
|
|
@ -36,7 +36,7 @@
|
|||
* struct kfifo with kfree().
|
||||
*/
|
||||
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
|
||||
unsigned int __nocast gfp_mask, spinlock_t *lock)
|
||||
gfp_t gfp_mask, spinlock_t *lock)
|
||||
{
|
||||
struct kfifo *fifo;
|
||||
|
||||
|
@ -64,7 +64,7 @@ EXPORT_SYMBOL(kfifo_init);
|
|||
*
|
||||
* The size will be rounded-up to a power of 2.
|
||||
*/
|
||||
struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask, spinlock_t *lock)
|
||||
struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock)
|
||||
{
|
||||
unsigned char *buffer;
|
||||
struct kfifo *ret;
|
||||
|
|
|
@ -262,7 +262,7 @@ next_signal(struct sigpending *pending, sigset_t *mask)
|
|||
return sig;
|
||||
}
|
||||
|
||||
static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags,
|
||||
static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
|
||||
int override_rlimit)
|
||||
{
|
||||
struct sigqueue *q = NULL;
|
||||
|
|
|
@ -110,7 +110,7 @@ radix_tree_node_free(struct radix_tree_node *node)
|
|||
* success, return zero, with preemption disabled. On error, return -ENOMEM
|
||||
* with preemption not disabled.
|
||||
*/
|
||||
int radix_tree_preload(unsigned int __nocast gfp_mask)
|
||||
int radix_tree_preload(gfp_t gfp_mask)
|
||||
{
|
||||
struct radix_tree_preload *rtp;
|
||||
struct radix_tree_node *node;
|
||||
|
|
|
@ -127,7 +127,7 @@ static void compute_prefix_tbl(struct ts_bm *bm, const u8 *pattern,
|
|||
}
|
||||
|
||||
static struct ts_config *bm_init(const void *pattern, unsigned int len,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct ts_config *conf;
|
||||
struct ts_bm *bm;
|
||||
|
|
|
@ -258,7 +258,7 @@ static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state)
|
|||
}
|
||||
|
||||
static struct ts_config *fsm_init(const void *pattern, unsigned int len,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
int i, err = -EINVAL;
|
||||
struct ts_config *conf;
|
||||
|
|
|
@ -87,7 +87,7 @@ static inline void compute_prefix_tbl(const u8 *pattern, unsigned int len,
|
|||
}
|
||||
|
||||
static struct ts_config *kmp_init(const void *pattern, unsigned int len,
|
||||
unsigned int __nocast gfp_mask)
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
struct ts_config *conf;
|
||||
struct ts_kmp *kmp;
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
|
||||
static mempool_t *page_pool, *isa_page_pool;
|
||||
|
||||
static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data)
|
||||
static void *page_pool_alloc(gfp_t gfp_mask, void *data)
|
||||
{
|
||||
unsigned int gfp = gfp_mask | (unsigned int) (long) data;
|
||||
|
||||
|
|
|
@ -687,7 +687,7 @@ get_vma_policy(struct task_struct *task, struct vm_area_struct *vma, unsigned lo
|
|||
}
|
||||
|
||||
/* Return a zonelist representing a mempolicy */
|
||||
static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct mempolicy *policy)
|
||||
static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
|
||||
{
|
||||
int nd;
|
||||
|
||||
|
@ -751,7 +751,7 @@ static unsigned offset_il_node(struct mempolicy *pol,
|
|||
|
||||
/* Allocate a page in interleaved policy.
|
||||
Own path because it needs to do special accounting. */
|
||||
static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned order, unsigned nid)
|
||||
static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, unsigned nid)
|
||||
{
|
||||
struct zonelist *zl;
|
||||
struct page *page;
|
||||
|
@ -789,7 +789,7 @@ static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned or
|
|||
* Should be called with the mm_sem of the vma hold.
|
||||
*/
|
||||
struct page *
|
||||
alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned long addr)
|
||||
alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
|
||||
{
|
||||
struct mempolicy *pol = get_vma_policy(current, vma, addr);
|
||||
|
||||
|
@ -832,7 +832,7 @@ alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned l
|
|||
* 1) it's ok to take cpuset_sem (can WAIT), and
|
||||
* 2) allocating for current task (not interrupt).
|
||||
*/
|
||||
struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order)
|
||||
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
|
||||
{
|
||||
struct mempolicy *pol = current->mempolicy;
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ EXPORT_SYMBOL(mempool_create_node);
|
|||
* while this function is running. mempool_alloc() & mempool_free()
|
||||
* might be called (eg. from IRQ contexts) while this function executes.
|
||||
*/
|
||||
int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast gfp_mask)
|
||||
int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
|
||||
{
|
||||
void *element;
|
||||
void **new_elements;
|
||||
|
@ -200,7 +200,7 @@ EXPORT_SYMBOL(mempool_destroy);
|
|||
* *never* fails when called from process contexts. (it might
|
||||
* fail if called from an IRQ context.)
|
||||
*/
|
||||
void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
|
||||
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
|
||||
{
|
||||
void *element;
|
||||
unsigned long flags;
|
||||
|
@ -276,7 +276,7 @@ EXPORT_SYMBOL(mempool_free);
|
|||
/*
|
||||
* A commonly used alloc and free fn.
|
||||
*/
|
||||
void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data)
|
||||
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
|
||||
{
|
||||
kmem_cache_t *mem = (kmem_cache_t *) pool_data;
|
||||
return kmem_cache_alloc(mem, gfp_mask);
|
||||
|
|
|
@ -157,8 +157,7 @@ void vfree(void *addr)
|
|||
kfree(addr);
|
||||
}
|
||||
|
||||
void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
|
||||
pgprot_t prot)
|
||||
void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
|
||||
{
|
||||
/*
|
||||
* kmalloc doesn't like __GFP_HIGHMEM for some reason
|
||||
|
|
|
@ -263,7 +263,7 @@ static struct mm_struct *oom_kill_process(struct task_struct *p)
|
|||
* OR try to be smart about which process to kill. Note that we
|
||||
* don't have to be perfect here, we just have to be good.
|
||||
*/
|
||||
void out_of_memory(unsigned int __nocast gfp_mask, int order)
|
||||
void out_of_memory(gfp_t gfp_mask, int order)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
task_t * p;
|
||||
|
|
|
@ -671,7 +671,7 @@ void fastcall free_cold_page(struct page *page)
|
|||
free_hot_cold_page(page, 1);
|
||||
}
|
||||
|
||||
static inline void prep_zero_page(struct page *page, int order, unsigned int __nocast gfp_flags)
|
||||
static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -686,7 +686,7 @@ static inline void prep_zero_page(struct page *page, int order, unsigned int __n
|
|||
* or two.
|
||||
*/
|
||||
static struct page *
|
||||
buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags)
|
||||
buffered_rmqueue(struct zone *zone, int order, gfp_t gfp_flags)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct page *page = NULL;
|
||||
|
@ -761,7 +761,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
|
|||
}
|
||||
|
||||
static inline int
|
||||
should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
|
||||
should_reclaim_zone(struct zone *z, gfp_t gfp_mask)
|
||||
{
|
||||
if (!z->reclaim_pages)
|
||||
return 0;
|
||||
|
@ -774,7 +774,7 @@ should_reclaim_zone(struct zone *z, unsigned int gfp_mask)
|
|||
* This is the 'heart' of the zoned buddy allocator.
|
||||
*/
|
||||
struct page * fastcall
|
||||
__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
|
||||
__alloc_pages(gfp_t gfp_mask, unsigned int order,
|
||||
struct zonelist *zonelist)
|
||||
{
|
||||
const int wait = gfp_mask & __GFP_WAIT;
|
||||
|
@ -977,7 +977,7 @@ EXPORT_SYMBOL(__alloc_pages);
|
|||
/*
|
||||
* Common helper functions.
|
||||
*/
|
||||
fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned int order)
|
||||
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
|
||||
{
|
||||
struct page * page;
|
||||
page = alloc_pages(gfp_mask, order);
|
||||
|
@ -988,7 +988,7 @@ fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask, unsigned
|
|||
|
||||
EXPORT_SYMBOL(__get_free_pages);
|
||||
|
||||
fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask)
|
||||
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
|
||||
{
|
||||
struct page * page;
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#include <linux/writeback.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index,
|
||||
static struct bio *get_swap_bio(gfp_t gfp_flags, pgoff_t index,
|
||||
struct page *page, bio_end_io_t end_io)
|
||||
{
|
||||
struct bio *bio;
|
||||
|
|
|
@ -921,8 +921,7 @@ shmem_swapin(struct shmem_inode_info *info,swp_entry_t entry,unsigned long idx)
|
|||
}
|
||||
|
||||
static inline struct page *
|
||||
shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info,
|
||||
unsigned long idx)
|
||||
shmem_alloc_page(gfp_t gfp,struct shmem_inode_info *info, unsigned long idx)
|
||||
{
|
||||
return alloc_page(gfp | __GFP_ZERO);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue