ChangeSet 1.2233, 2005/03/28 17:42:49-08:00, [EMAIL PROTECTED]
Mark "gfp" masks as "unsigned int" and use __nocast to find violations.
This makes it hard(er) to mix argument orders by mistake for things like
kmalloc() and friends, since silent integer promotion is now caught by
sparse.
arch/i386/kernel/pci-dma.c | 2 -
arch/ppc64/kernel/dma.c | 2 -
arch/ppc64/kernel/iommu.c | 2 -
arch/ppc64/kernel/pci_direct_iommu.c | 2 -
arch/ppc64/kernel/pci_iommu.c | 2 -
arch/ppc64/kernel/vio.c | 2 -
drivers/base/dmapool.c | 2 -
drivers/block/pktcdvd.c | 4 +--
drivers/md/dm-crypt.c | 2 -
drivers/md/dm-io.c | 2 -
drivers/md/dm-raid1.c | 2 -
drivers/md/multipath.c | 2 -
drivers/md/raid1.c | 4 +--
drivers/md/raid10.c | 4 +--
drivers/s390/scsi/zfcp_aux.c | 2 -
fs/bio.c | 10 ++++-----
fs/buffer.c | 2 -
fs/cifs/connect.c | 2 -
fs/mpage.c | 3 +-
fs/posix_acl.c | 6 ++---
include/asm-generic/dma-mapping.h | 4 +--
include/asm-i386/dma-mapping.h | 2 -
include/asm-ppc64/dma-mapping.h | 4 +--
include/asm-ppc64/iommu.h | 2 -
include/linux/bio.h | 6 ++---
include/linux/blkdev.h | 2 -
include/linux/buffer_head.h | 2 -
include/linux/gfp.h | 36 +++++++++++++++++------------------
include/linux/jbd.h | 2 -
include/linux/kfifo.h | 4 +--
include/linux/mempool.h | 8 +++----
include/linux/pagemap.h | 2 -
include/linux/posix_acl.h | 6 ++---
include/linux/slab.h | 8 +++----
include/linux/swap.h | 2 -
include/linux/vmalloc.h | 4 +--
kernel/kfifo.c | 4 +--
kernel/signal.c | 2 -
mm/filemap.c | 2 -
mm/highmem.c | 4 +--
mm/mempolicy.c | 8 +++----
mm/mempool.c | 6 ++---
mm/oom_kill.c | 2 -
mm/page_alloc.c | 10 ++++-----
mm/page_io.c | 2 -
mm/shmem.c | 2 -
mm/slab.c | 22 ++++++++++-----------
mm/vmalloc.c | 4 +--
48 files changed, 111 insertions(+), 110 deletions(-)
diff -Nru a/arch/i386/kernel/pci-dma.c b/arch/i386/kernel/pci-dma.c
--- a/arch/i386/kernel/pci-dma.c 2005-03-28 21:06:08 -08:00
+++ b/arch/i386/kernel/pci-dma.c 2005-03-28 21:06:08 -08:00
@@ -22,7 +22,7 @@
};
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int gfp)
+ dma_addr_t *dma_handle, unsigned int __nocast gfp)
{
void *ret;
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
diff -Nru a/arch/ppc64/kernel/dma.c b/arch/ppc64/kernel/dma.c
--- a/arch/ppc64/kernel/dma.c 2005-03-28 21:06:08 -08:00
+++ b/arch/ppc64/kernel/dma.c 2005-03-28 21:06:08 -08:00
@@ -49,7 +49,7 @@
EXPORT_SYMBOL(dma_set_mask);
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, unsigned int __nocast flag)
{
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
diff -Nru a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c
--- a/arch/ppc64/kernel/iommu.c 2005-03-28 21:06:08 -08:00
+++ b/arch/ppc64/kernel/iommu.c 2005-03-28 21:06:08 -08:00
@@ -514,7 +514,7 @@
* to the dma address (mapping) of the first page.
*/
void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, unsigned int __nocast flag)
{
void *ret = NULL;
dma_addr_t mapping;
diff -Nru a/arch/ppc64/kernel/pci_direct_iommu.c
b/arch/ppc64/kernel/pci_direct_iommu.c
--- a/arch/ppc64/kernel/pci_direct_iommu.c 2005-03-28 21:06:08 -08:00
+++ b/arch/ppc64/kernel/pci_direct_iommu.c 2005-03-28 21:06:08 -08:00
@@ -31,7 +31,7 @@
#include "pci.h"
static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, unsigned int
__nocast flag)
{
void *ret;
diff -Nru a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
--- a/arch/ppc64/kernel/pci_iommu.c 2005-03-28 21:06:08 -08:00
+++ b/arch/ppc64/kernel/pci_iommu.c 2005-03-28 21:06:08 -08:00
@@ -76,7 +76,7 @@
* to the dma address (mapping) of the first page.
*/
static void *pci_iommu_alloc_coherent(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, unsigned int __nocast flag)
{
return iommu_alloc_coherent(devnode_table(hwdev), size, dma_handle,
flag);
diff -Nru a/arch/ppc64/kernel/vio.c b/arch/ppc64/kernel/vio.c
--- a/arch/ppc64/kernel/vio.c 2005-03-28 21:06:08 -08:00
+++ b/arch/ppc64/kernel/vio.c 2005-03-28 21:06:08 -08:00
@@ -585,7 +585,7 @@
}
static void *vio_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag)
+ dma_addr_t *dma_handle, unsigned int __nocast flag)
{
return iommu_alloc_coherent(to_vio_dev(dev)->iommu_table, size,
dma_handle, flag);
diff -Nru a/drivers/base/dmapool.c b/drivers/base/dmapool.c
--- a/drivers/base/dmapool.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/base/dmapool.c 2005-03-28 21:06:08 -08:00
@@ -156,7 +156,7 @@
static struct dma_page *
-pool_alloc_page (struct dma_pool *pool, int mem_flags)
+pool_alloc_page (struct dma_pool *pool, unsigned int __nocast mem_flags)
{
struct dma_page *page;
int mapsize;
diff -Nru a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
--- a/drivers/block/pktcdvd.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/block/pktcdvd.c 2005-03-28 21:06:08 -08:00
@@ -219,7 +219,7 @@
return 1;
}
-static void *pkt_rb_alloc(int gfp_mask, void *data)
+static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
{
return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
}
@@ -2054,7 +2054,7 @@
}
-static void *psd_pool_alloc(int gfp_mask, void *data)
+static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
{
return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
}
diff -Nru a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
--- a/drivers/md/dm-crypt.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/md/dm-crypt.c 2005-03-28 21:06:08 -08:00
@@ -96,7 +96,7 @@
/*
* Mempool alloc and free functions for the page
*/
-static void *mempool_alloc_page(int gfp_mask, void *data)
+static void *mempool_alloc_page(unsigned int __nocast gfp_mask, void *data)
{
return alloc_page(gfp_mask);
}
diff -Nru a/drivers/md/dm-io.c b/drivers/md/dm-io.c
--- a/drivers/md/dm-io.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/md/dm-io.c 2005-03-28 21:06:08 -08:00
@@ -32,7 +32,7 @@
static unsigned _num_ios;
static mempool_t *_io_pool;
-static void *alloc_io(int gfp_mask, void *pool_data)
+static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data)
{
return kmalloc(sizeof(struct io), gfp_mask);
}
diff -Nru a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
--- a/drivers/md/dm-raid1.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/md/dm-raid1.c 2005-03-28 21:06:08 -08:00
@@ -122,7 +122,7 @@
/* FIXME move this */
static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
-static void *region_alloc(int gfp_mask, void *pool_data)
+static void *region_alloc(unsigned int __nocast gfp_mask, void *pool_data)
{
return kmalloc(sizeof(struct region), gfp_mask);
}
diff -Nru a/drivers/md/multipath.c b/drivers/md/multipath.c
--- a/drivers/md/multipath.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/md/multipath.c 2005-03-28 21:06:08 -08:00
@@ -38,7 +38,7 @@
static mdk_personality_t multipath_personality;
-static void *mp_pool_alloc(int gfp_flags, void *data)
+static void *mp_pool_alloc(unsigned int __nocast gfp_flags, void *data)
{
struct multipath_bh *mpb;
mpb = kmalloc(sizeof(*mpb), gfp_flags);
diff -Nru a/drivers/md/raid1.c b/drivers/md/raid1.c
--- a/drivers/md/raid1.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/md/raid1.c 2005-03-28 21:06:08 -08:00
@@ -34,7 +34,7 @@
static void unplug_slaves(mddev_t *mddev);
-static void * r1bio_pool_alloc(int gfp_flags, void *data)
+static void * r1bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
{
struct pool_info *pi = data;
r1bio_t *r1_bio;
@@ -61,7 +61,7 @@
#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
#define RESYNC_WINDOW (2048*1024)
-static void * r1buf_pool_alloc(int gfp_flags, void *data)
+static void * r1buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
{
struct pool_info *pi = data;
struct page *page;
diff -Nru a/drivers/md/raid10.c b/drivers/md/raid10.c
--- a/drivers/md/raid10.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/md/raid10.c 2005-03-28 21:06:08 -08:00
@@ -47,7 +47,7 @@
static void unplug_slaves(mddev_t *mddev);
-static void * r10bio_pool_alloc(int gfp_flags, void *data)
+static void * r10bio_pool_alloc(unsigned int __nocast gfp_flags, void *data)
{
conf_t *conf = data;
r10bio_t *r10_bio;
@@ -81,7 +81,7 @@
* one for write (we recover only one drive per r10buf)
*
*/
-static void * r10buf_pool_alloc(int gfp_flags, void *data)
+static void * r10buf_pool_alloc(unsigned int __nocast gfp_flags, void *data)
{
conf_t *conf = data;
struct page *page;
diff -Nru a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
--- a/drivers/s390/scsi/zfcp_aux.c 2005-03-28 21:06:08 -08:00
+++ b/drivers/s390/scsi/zfcp_aux.c 2005-03-28 21:06:08 -08:00
@@ -928,7 +928,7 @@
}
static void *
-zfcp_mempool_alloc(int gfp_mask, void *size)
+zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
{
return kmalloc((size_t) size, gfp_mask);
}
diff -Nru a/fs/bio.c b/fs/bio.c
--- a/fs/bio.c 2005-03-28 21:06:08 -08:00
+++ b/fs/bio.c 2005-03-28 21:06:08 -08:00
@@ -74,7 +74,7 @@
*/
static struct bio_set *fs_bio_set;
-static inline struct bio_vec *bvec_alloc_bs(int gfp_mask, int nr, unsigned
long *idx, struct bio_set *bs)
+static inline struct bio_vec *bvec_alloc_bs(unsigned int __nocast gfp_mask,
int nr, unsigned long *idx, struct bio_set *bs)
{
struct bio_vec *bvl;
struct biovec_slab *bp;
@@ -149,7 +149,7 @@
* allocate bio and iovecs from the memory pools specified by the
* bio_set structure.
**/
-struct bio *bio_alloc_bioset(int gfp_mask, int nr_iovecs, struct bio_set *bs)
+struct bio *bio_alloc_bioset(unsigned int __nocast gfp_mask, int nr_iovecs,
struct bio_set *bs)
{
struct bio *bio = mempool_alloc(bs->bio_pool, gfp_mask);
@@ -177,7 +177,7 @@
return bio;
}
-struct bio *bio_alloc(int gfp_mask, int nr_iovecs)
+struct bio *bio_alloc(unsigned int __nocast gfp_mask, int nr_iovecs)
{
return bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
}
@@ -271,7 +271,7 @@
*
* Like __bio_clone, only also allocates the returned bio
*/
-struct bio *bio_clone(struct bio *bio, int gfp_mask)
+struct bio *bio_clone(struct bio *bio, unsigned int __nocast gfp_mask)
{
struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
fs_bio_set);
@@ -934,7 +934,7 @@
return bp;
}
-static void *bio_pair_alloc(int gfp_flags, void *data)
+static void *bio_pair_alloc(unsigned int __nocast gfp_flags, void *data)
{
return kmalloc(sizeof(struct bio_pair), gfp_flags);
}
diff -Nru a/fs/buffer.c b/fs/buffer.c
--- a/fs/buffer.c 2005-03-28 21:06:08 -08:00
+++ b/fs/buffer.c 2005-03-28 21:06:08 -08:00
@@ -3052,7 +3052,7 @@
buffer_heads_over_limit = (tot > max_buffer_heads);
}
-struct buffer_head *alloc_buffer_head(int gfp_flags)
+struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags)
{
struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
if (ret) {
diff -Nru a/fs/cifs/connect.c b/fs/cifs/connect.c
--- a/fs/cifs/connect.c 2005-03-28 21:06:08 -08:00
+++ b/fs/cifs/connect.c 2005-03-28 21:06:08 -08:00
@@ -470,7 +470,7 @@
}
static void *
-cifs_kcalloc(size_t size, int type)
+cifs_kcalloc(size_t size, unsigned int __nocast type)
{
void *addr;
addr = kmalloc(size, type);
diff -Nru a/fs/mpage.c b/fs/mpage.c
--- a/fs/mpage.c 2005-03-28 21:06:08 -08:00
+++ b/fs/mpage.c 2005-03-28 21:06:08 -08:00
@@ -98,7 +98,8 @@
static struct bio *
mpage_alloc(struct block_device *bdev,
- sector_t first_sector, int nr_vecs, int gfp_flags)
+ sector_t first_sector, int nr_vecs,
+ unsigned int __nocast gfp_flags)
{
struct bio *bio;
diff -Nru a/fs/posix_acl.c b/fs/posix_acl.c
--- a/fs/posix_acl.c 2005-03-28 21:06:08 -08:00
+++ b/fs/posix_acl.c 2005-03-28 21:06:08 -08:00
@@ -35,7 +35,7 @@
* Allocate a new ACL with the specified number of entries.
*/
struct posix_acl *
-posix_acl_alloc(int count, int flags)
+posix_acl_alloc(int count, unsigned int __nocast flags)
{
const size_t size = sizeof(struct posix_acl) +
count * sizeof(struct posix_acl_entry);
@@ -51,7 +51,7 @@
* Clone an ACL.
*/
struct posix_acl *
-posix_acl_clone(const struct posix_acl *acl, int flags)
+posix_acl_clone(const struct posix_acl *acl, unsigned int __nocast flags)
{
struct posix_acl *clone = NULL;
@@ -185,7 +185,7 @@
* Create an ACL representing the file mode permission bits of an inode.
*/
struct posix_acl *
-posix_acl_from_mode(mode_t mode, int flags)
+posix_acl_from_mode(mode_t mode, unsigned int __nocast flags)
{
struct posix_acl *acl = posix_acl_alloc(3, flags);
if (!acl)
diff -Nru a/include/asm-generic/dma-mapping.h
b/include/asm-generic/dma-mapping.h
--- a/include/asm-generic/dma-mapping.h 2005-03-28 21:06:08 -08:00
+++ b/include/asm-generic/dma-mapping.h 2005-03-28 21:06:08 -08:00
@@ -35,7 +35,7 @@
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ unsigned int __nocast flag)
{
BUG_ON(dev->bus != &pci_bus_type);
@@ -168,7 +168,7 @@
static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- int flag)
+ unsigned int __nocast flag)
{
BUG();
return NULL;
diff -Nru a/include/asm-i386/dma-mapping.h b/include/asm-i386/dma-mapping.h
--- a/include/asm-i386/dma-mapping.h 2005-03-28 21:06:08 -08:00
+++ b/include/asm-i386/dma-mapping.h 2005-03-28 21:06:08 -08:00
@@ -11,7 +11,7 @@
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, unsigned int __nocast flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
diff -Nru a/include/asm-ppc64/dma-mapping.h b/include/asm-ppc64/dma-mapping.h
--- a/include/asm-ppc64/dma-mapping.h 2005-03-28 21:06:08 -08:00
+++ b/include/asm-ppc64/dma-mapping.h 2005-03-28 21:06:08 -08:00
@@ -19,7 +19,7 @@
extern int dma_supported(struct device *dev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 dma_mask);
extern void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, unsigned int __nocast flag);
extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_handle);
extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
@@ -118,7 +118,7 @@
*/
struct dma_mapping_ops {
void * (*alloc_coherent)(struct device *dev, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, unsigned int __nocast
flag);
void (*free_coherent)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
dma_addr_t (*map_single)(struct device *dev, void *ptr,
diff -Nru a/include/asm-ppc64/iommu.h b/include/asm-ppc64/iommu.h
--- a/include/asm-ppc64/iommu.h 2005-03-28 21:06:08 -08:00
+++ b/include/asm-ppc64/iommu.h 2005-03-28 21:06:08 -08:00
@@ -146,7 +146,7 @@
int nelems, enum dma_data_direction direction);
extern void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size,
- dma_addr_t *dma_handle, int flag);
+ dma_addr_t *dma_handle, unsigned int __nocast flag);
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
diff -Nru a/include/linux/bio.h b/include/linux/bio.h
--- a/include/linux/bio.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/bio.h 2005-03-28 21:06:08 -08:00
@@ -263,8 +263,8 @@
extern struct bio_set *bioset_create(int, int, int);
extern void bioset_free(struct bio_set *);
-extern struct bio *bio_alloc(int, int);
-extern struct bio *bio_alloc_bioset(int, int, struct bio_set *);
+extern struct bio *bio_alloc(unsigned int __nocast, int);
+extern struct bio *bio_alloc_bioset(unsigned int __nocast, int, struct bio_set
*);
extern void bio_put(struct bio *);
extern void bio_endio(struct bio *, unsigned int, int);
@@ -273,7 +273,7 @@
extern int bio_hw_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
-extern struct bio *bio_clone(struct bio *, int);
+extern struct bio *bio_clone(struct bio *, unsigned int __nocast);
extern void bio_init(struct bio *);
diff -Nru a/include/linux/blkdev.h b/include/linux/blkdev.h
--- a/include/linux/blkdev.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/blkdev.h 2005-03-28 21:06:08 -08:00
@@ -347,7 +347,7 @@
* queue needs bounce pages for pages above this limit
*/
unsigned long bounce_pfn;
- int bounce_gfp;
+ unsigned int bounce_gfp;
/*
* various queue flags, see QUEUE_* below
diff -Nru a/include/linux/buffer_head.h b/include/linux/buffer_head.h
--- a/include/linux/buffer_head.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/buffer_head.h 2005-03-28 21:06:08 -08:00
@@ -169,7 +169,7 @@
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, int size);
struct buffer_head *__bread(struct block_device *, sector_t block, int size);
-struct buffer_head *alloc_buffer_head(int gfp_flags);
+struct buffer_head *alloc_buffer_head(unsigned int __nocast gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void FASTCALL(__lock_buffer(struct buffer_head *bh));
diff -Nru a/include/linux/gfp.h b/include/linux/gfp.h
--- a/include/linux/gfp.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/gfp.h 2005-03-28 21:06:08 -08:00
@@ -26,18 +26,18 @@
*
* __GFP_NORETRY: The VM implementation must not retry indefinitely.
*/
-#define __GFP_WAIT 0x10 /* Can wait and reschedule? */
-#define __GFP_HIGH 0x20 /* Should access emergency pools? */
-#define __GFP_IO 0x40 /* Can start physical IO? */
-#define __GFP_FS 0x80 /* Can call down to low-level FS? */
-#define __GFP_COLD 0x100 /* Cache-cold page required */
-#define __GFP_NOWARN 0x200 /* Suppress page allocation failure warning */
-#define __GFP_REPEAT 0x400 /* Retry the allocation. Might fail */
-#define __GFP_NOFAIL 0x800 /* Retry for ever. Cannot fail */
-#define __GFP_NORETRY 0x1000 /* Do not retry. Might fail */
-#define __GFP_NO_GROW 0x2000 /* Slab internal usage */
-#define __GFP_COMP 0x4000 /* Add compound page metadata */
-#define __GFP_ZERO 0x8000 /* Return zeroed page on success */
+#define __GFP_WAIT 0x10u /* Can wait and reschedule? */
+#define __GFP_HIGH 0x20u /* Should access emergency pools? */
+#define __GFP_IO 0x40u /* Can start physical IO? */
+#define __GFP_FS 0x80u /* Can call down to low-level FS? */
+#define __GFP_COLD 0x100u /* Cache-cold page required */
+#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */
+#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */
+#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */
+#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */
+#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
+#define __GFP_COMP 0x4000u /* Add compound page metadata */
+#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */
#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
@@ -82,7 +82,7 @@
extern struct page *
FASTCALL(__alloc_pages(unsigned int, unsigned int, struct zonelist *));
-static inline struct page *alloc_pages_node(int nid, unsigned int gfp_mask,
+static inline struct page *alloc_pages_node(int nid, unsigned int __nocast
gfp_mask,
unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
@@ -93,17 +93,17 @@
}
#ifdef CONFIG_NUMA
-extern struct page *alloc_pages_current(unsigned gfp_mask, unsigned order);
+extern struct page *alloc_pages_current(unsigned int __nocast gfp_mask,
unsigned order);
static inline struct page *
-alloc_pages(unsigned int gfp_mask, unsigned int order)
+alloc_pages(unsigned int __nocast gfp_mask, unsigned int order)
{
if (unlikely(order >= MAX_ORDER))
return NULL;
return alloc_pages_current(gfp_mask, order);
}
-extern struct page *alloc_page_vma(unsigned gfp_mask,
+extern struct page *alloc_page_vma(unsigned __nocast gfp_mask,
struct vm_area_struct *vma, unsigned long addr);
#else
#define alloc_pages(gfp_mask, order) \
@@ -112,8 +112,8 @@
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
-extern unsigned long FASTCALL(__get_free_pages(unsigned int gfp_mask, unsigned
int order));
-extern unsigned long FASTCALL(get_zeroed_page(unsigned int gfp_mask));
+extern unsigned long FASTCALL(__get_free_pages(unsigned int __nocast gfp_mask,
unsigned int order));
+extern unsigned long FASTCALL(get_zeroed_page(unsigned int __nocast gfp_mask));
#define __get_free_page(gfp_mask) \
__get_free_pages((gfp_mask),0)
diff -Nru a/include/linux/jbd.h b/include/linux/jbd.h
--- a/include/linux/jbd.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/jbd.h 2005-03-28 21:06:08 -08:00
@@ -934,7 +934,7 @@
*/
extern kmem_cache_t *jbd_handle_cache;
-static inline handle_t *jbd_alloc_handle(int gfp_flags)
+static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags)
{
return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
}
diff -Nru a/include/linux/kfifo.h b/include/linux/kfifo.h
--- a/include/linux/kfifo.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/kfifo.h 2005-03-28 21:06:08 -08:00
@@ -35,8 +35,8 @@
};
extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- int gfp_mask, spinlock_t *lock);
-extern struct kfifo *kfifo_alloc(unsigned int size, int gfp_mask,
+ unsigned int __nocast gfp_mask, spinlock_t
*lock);
+extern struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast
gfp_mask,
spinlock_t *lock);
extern void kfifo_free(struct kfifo *fifo);
extern unsigned int __kfifo_put(struct kfifo *fifo,
diff -Nru a/include/linux/mempool.h b/include/linux/mempool.h
--- a/include/linux/mempool.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/mempool.h 2005-03-28 21:06:08 -08:00
@@ -6,7 +6,7 @@
#include <linux/wait.h>
-typedef void * (mempool_alloc_t)(int gfp_mask, void *pool_data);
+typedef void * (mempool_alloc_t)(unsigned int __nocast gfp_mask, void
*pool_data);
typedef void (mempool_free_t)(void *element, void *pool_data);
typedef struct mempool_s {
@@ -22,16 +22,16 @@
} mempool_t;
extern mempool_t * mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
mempool_free_t *free_fn, void *pool_data);
-extern int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask);
+extern int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int
__nocast gfp_mask);
extern void mempool_destroy(mempool_t *pool);
-extern void * mempool_alloc(mempool_t *pool, int gfp_mask);
+extern void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask);
extern void mempool_free(void *element, mempool_t *pool);
/*
* A mempool_alloc_t and mempool_free_t that get the memory from
* a slab that is passed in through pool_data.
*/
-void *mempool_alloc_slab(int gfp_mask, void *pool_data);
+void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data);
void mempool_free_slab(void *element, void *pool_data);
#endif /* _LINUX_MEMPOOL_H */
diff -Nru a/include/linux/pagemap.h b/include/linux/pagemap.h
--- a/include/linux/pagemap.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/pagemap.h 2005-03-28 21:06:08 -08:00
@@ -19,7 +19,7 @@
#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async
write */
#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
-static inline int mapping_gfp_mask(struct address_space * mapping)
+static inline unsigned int __nocast mapping_gfp_mask(struct address_space *
mapping)
{
return mapping->flags & __GFP_BITS_MASK;
}
diff -Nru a/include/linux/posix_acl.h b/include/linux/posix_acl.h
--- a/include/linux/posix_acl.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/posix_acl.h 2005-03-28 21:06:08 -08:00
@@ -71,11 +71,11 @@
/* posix_acl.c */
-extern struct posix_acl *posix_acl_alloc(int, int);
-extern struct posix_acl *posix_acl_clone(const struct posix_acl *, int);
+extern struct posix_acl *posix_acl_alloc(int, unsigned int __nocast);
+extern struct posix_acl *posix_acl_clone(const struct posix_acl *, unsigned
int __nocast);
extern int posix_acl_valid(const struct posix_acl *);
extern int posix_acl_permission(struct inode *, const struct posix_acl *, int);
-extern struct posix_acl *posix_acl_from_mode(mode_t, int);
+extern struct posix_acl *posix_acl_from_mode(mode_t, unsigned int __nocast);
extern int posix_acl_equiv_mode(const struct posix_acl *, mode_t *);
extern int posix_acl_create_masq(struct posix_acl *, mode_t *);
extern int posix_acl_chmod_masq(struct posix_acl *, mode_t);
diff -Nru a/include/linux/slab.h b/include/linux/slab.h
--- a/include/linux/slab.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/slab.h 2005-03-28 21:06:08 -08:00
@@ -61,7 +61,7 @@
void (*)(void *, kmem_cache_t *,
unsigned long));
extern int kmem_cache_destroy(kmem_cache_t *);
extern int kmem_cache_shrink(kmem_cache_t *);
-extern void *kmem_cache_alloc(kmem_cache_t *, int);
+extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(kmem_cache_t *, int);
#else
@@ -80,9 +80,9 @@
kmem_cache_t *cs_dmacachep;
};
extern struct cache_sizes malloc_sizes[];
-extern void *__kmalloc(size_t, int);
+extern void *__kmalloc(size_t, unsigned int __nocast);
-static inline void *kmalloc(size_t size, int flags)
+static inline void *kmalloc(size_t size, unsigned int __nocast flags)
{
if (__builtin_constant_p(size)) {
int i = 0;
@@ -105,7 +105,7 @@
return __kmalloc(size, flags);
}
-extern void *kcalloc(size_t, size_t, int);
+extern void *kcalloc(size_t, size_t, unsigned int __nocast);
extern void kfree(const void *);
extern unsigned int ksize(const void *);
diff -Nru a/include/linux/swap.h b/include/linux/swap.h
--- a/include/linux/swap.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/swap.h 2005-03-28 21:06:08 -08:00
@@ -148,7 +148,7 @@
#define vm_swap_full() (nr_swap_pages*2 < total_swap_pages)
/* linux/mm/oom_kill.c */
-extern void out_of_memory(int gfp_mask);
+extern void out_of_memory(unsigned int __nocast gfp_mask);
/* linux/mm/memory.c */
extern void swapin_readahead(swp_entry_t, unsigned long, struct vm_area_struct
*);
diff -Nru a/include/linux/vmalloc.h b/include/linux/vmalloc.h
--- a/include/linux/vmalloc.h 2005-03-28 21:06:08 -08:00
+++ b/include/linux/vmalloc.h 2005-03-28 21:06:08 -08:00
@@ -26,8 +26,8 @@
extern void *vmalloc(unsigned long size);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
-extern void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot);
-extern void *__vmalloc_area(struct vm_struct *area, int gfp_mask, pgprot_t
prot);
+extern void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask,
pgprot_t prot);
+extern void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast
gfp_mask, pgprot_t prot);
extern void vfree(void *addr);
extern void *vmap(struct page **pages, unsigned int count,
diff -Nru a/kernel/kfifo.c b/kernel/kfifo.c
--- a/kernel/kfifo.c 2005-03-28 21:06:08 -08:00
+++ b/kernel/kfifo.c 2005-03-28 21:06:08 -08:00
@@ -36,7 +36,7 @@
* struct kfifo with kfree().
*/
struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
- int gfp_mask, spinlock_t *lock)
+ unsigned int __nocast gfp_mask, spinlock_t *lock)
{
struct kfifo *fifo;
@@ -64,7 +64,7 @@
*
* The size will be rounded-up to a power of 2.
*/
-struct kfifo *kfifo_alloc(unsigned int size, int gfp_mask, spinlock_t *lock)
+struct kfifo *kfifo_alloc(unsigned int size, unsigned int __nocast gfp_mask,
spinlock_t *lock)
{
unsigned char *buffer;
struct kfifo *ret;
diff -Nru a/kernel/signal.c b/kernel/signal.c
--- a/kernel/signal.c 2005-03-28 21:06:08 -08:00
+++ b/kernel/signal.c 2005-03-28 21:06:08 -08:00
@@ -259,7 +259,7 @@
return sig;
}
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int
__nocast flags,
int override_rlimit)
{
struct sigqueue *q = NULL;
diff -Nru a/mm/filemap.c b/mm/filemap.c
--- a/mm/filemap.c 2005-03-28 21:06:08 -08:00
+++ b/mm/filemap.c 2005-03-28 21:06:08 -08:00
@@ -666,7 +666,7 @@
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
- int gfp_mask;
+ unsigned int gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
diff -Nru a/mm/highmem.c b/mm/highmem.c
--- a/mm/highmem.c 2005-03-28 21:06:08 -08:00
+++ b/mm/highmem.c 2005-03-28 21:06:08 -08:00
@@ -30,9 +30,9 @@
static mempool_t *page_pool, *isa_page_pool;
-static void *page_pool_alloc(int gfp_mask, void *data)
+static void *page_pool_alloc(unsigned int __nocast gfp_mask, void *data)
{
- int gfp = gfp_mask | (int) (long) data;
+ unsigned int gfp = gfp_mask | (unsigned int) (long) data;
return alloc_page(gfp);
}
diff -Nru a/mm/mempolicy.c b/mm/mempolicy.c
--- a/mm/mempolicy.c 2005-03-28 21:06:08 -08:00
+++ b/mm/mempolicy.c 2005-03-28 21:06:08 -08:00
@@ -648,7 +648,7 @@
}
/* Return a zonelist representing a mempolicy */
-static struct zonelist *zonelist_policy(unsigned gfp, struct mempolicy *policy)
+static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct
mempolicy *policy)
{
int nd;
@@ -712,7 +712,7 @@
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
-static struct page *alloc_page_interleave(unsigned gfp, unsigned order,
unsigned nid)
+static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned
order, unsigned nid)
{
struct zonelist *zl;
struct page *page;
@@ -750,7 +750,7 @@
* Should be called with the mm_sem of the vma hold.
*/
struct page *
-alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned
long addr)
{
struct mempolicy *pol = get_vma_policy(vma, addr);
@@ -789,7 +789,7 @@
* interrupt context and apply the current process NUMA policy.
* Returns NULL when no page can be allocated.
*/
-struct page *alloc_pages_current(unsigned gfp, unsigned order)
+struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order)
{
struct mempolicy *pol = current->mempolicy;
diff -Nru a/mm/mempool.c b/mm/mempool.c
--- a/mm/mempool.c 2005-03-28 21:06:08 -08:00
+++ b/mm/mempool.c 2005-03-28 21:06:08 -08:00
@@ -105,7 +105,7 @@
* while this function is running. mempool_alloc() & mempool_free()
* might be called (eg. from IRQ contexts) while this function executes.
*/
-int mempool_resize(mempool_t *pool, int new_min_nr, int gfp_mask)
+int mempool_resize(mempool_t *pool, int new_min_nr, unsigned int __nocast
gfp_mask)
{
void *element;
void **new_elements;
@@ -193,7 +193,7 @@
* *never* fails when called from process contexts. (it might
* fail if called from an IRQ context.)
*/
-void * mempool_alloc(mempool_t *pool, int gfp_mask)
+void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask)
{
void *element;
unsigned long flags;
@@ -275,7 +275,7 @@
/*
* A commonly used alloc and free fn.
*/
-void *mempool_alloc_slab(int gfp_mask, void *pool_data)
+void *mempool_alloc_slab(unsigned int __nocast gfp_mask, void *pool_data)
{
kmem_cache_t *mem = (kmem_cache_t *) pool_data;
return kmem_cache_alloc(mem, gfp_mask);
diff -Nru a/mm/oom_kill.c b/mm/oom_kill.c
--- a/mm/oom_kill.c 2005-03-28 21:06:08 -08:00
+++ b/mm/oom_kill.c 2005-03-28 21:06:08 -08:00
@@ -253,7 +253,7 @@
* OR try to be smart about which process to kill. Note that we
* don't have to be perfect here, we just have to be good.
*/
-void out_of_memory(int gfp_mask)
+void out_of_memory(unsigned int __nocast gfp_mask)
{
struct mm_struct *mm = NULL;
task_t * p;
diff -Nru a/mm/page_alloc.c b/mm/page_alloc.c
--- a/mm/page_alloc.c 2005-03-28 21:06:08 -08:00
+++ b/mm/page_alloc.c 2005-03-28 21:06:08 -08:00
@@ -633,7 +633,7 @@
free_hot_cold_page(page, 1);
}
-static inline void prep_zero_page(struct page *page, int order, int gfp_flags)
+static inline void prep_zero_page(struct page *page, int order, unsigned int
__nocast gfp_flags)
{
int i;
@@ -648,7 +648,7 @@
* or two.
*/
static struct page *
-buffered_rmqueue(struct zone *zone, int order, int gfp_flags)
+buffered_rmqueue(struct zone *zone, int order, unsigned int __nocast gfp_flags)
{
unsigned long flags;
struct page *page = NULL;
@@ -726,7 +726,7 @@
* This is the 'heart' of the zoned buddy allocator.
*/
struct page * fastcall
-__alloc_pages(unsigned int gfp_mask, unsigned int order,
+__alloc_pages(unsigned int __nocast gfp_mask, unsigned int order,
struct zonelist *zonelist)
{
const int wait = gfp_mask & __GFP_WAIT;
@@ -908,7 +908,7 @@
/*
* Common helper functions.
*/
-fastcall unsigned long __get_free_pages(unsigned int gfp_mask, unsigned int
order)
+fastcall unsigned long __get_free_pages(unsigned int __nocast gfp_mask,
unsigned int order)
{
struct page * page;
page = alloc_pages(gfp_mask, order);
@@ -919,7 +919,7 @@
EXPORT_SYMBOL(__get_free_pages);
-fastcall unsigned long get_zeroed_page(unsigned int gfp_mask)
+fastcall unsigned long get_zeroed_page(unsigned int __nocast gfp_mask)
{
struct page * page;
diff -Nru a/mm/page_io.c b/mm/page_io.c
--- a/mm/page_io.c 2005-03-28 21:06:08 -08:00
+++ b/mm/page_io.c 2005-03-28 21:06:08 -08:00
@@ -19,7 +19,7 @@
#include <linux/writeback.h>
#include <asm/pgtable.h>
-static struct bio *get_swap_bio(int gfp_flags, pgoff_t index,
+static struct bio *get_swap_bio(unsigned int __nocast gfp_flags, pgoff_t index,
struct page *page, bio_end_io_t end_io)
{
struct bio *bio;
diff -Nru a/mm/shmem.c b/mm/shmem.c
--- a/mm/shmem.c 2005-03-28 21:06:08 -08:00
+++ b/mm/shmem.c 2005-03-28 21:06:08 -08:00
@@ -922,7 +922,7 @@
}
static inline struct page *
-shmem_alloc_page(unsigned long gfp,struct shmem_inode_info *info,
+shmem_alloc_page(unsigned int __nocast gfp,struct shmem_inode_info *info,
unsigned long idx)
{
return alloc_page(gfp | __GFP_ZERO);
diff -Nru a/mm/slab.c b/mm/slab.c
--- a/mm/slab.c 2005-03-28 21:06:08 -08:00
+++ b/mm/slab.c 2005-03-28 21:06:08 -08:00
@@ -888,7 +888,7 @@
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid)
+static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags,
int nodeid)
{
struct page *page;
void *addr;
@@ -1678,7 +1678,7 @@
/* Get the memory for a slab management obj. */
static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
- void *objp, int colour_off, int local_flags)
+ void *objp, int colour_off, unsigned int __nocast
local_flags)
{
struct slab *slabp;
@@ -1749,7 +1749,7 @@
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, int flags)
+static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
@@ -1779,12 +1779,12 @@
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow (kmem_cache_t * cachep, int flags, int nodeid)
+static int cache_grow (kmem_cache_t * cachep, unsigned int __nocast flags, int
nodeid)
{
struct slab *slabp;
void *objp;
size_t offset;
- int local_flags;
+ unsigned int local_flags;
unsigned long ctor_flags;
/* Be lazy and only check for valid flags here,
@@ -1982,7 +1982,7 @@
#define check_slabp(x,y) do { } while(0)
#endif
-static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
+static void* cache_alloc_refill(kmem_cache_t* cachep, unsigned int __nocast
flags)
{
int batchcount;
struct kmem_list3 *l3;
@@ -2079,7 +2079,7 @@
}
static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
+cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast
flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
@@ -2134,7 +2134,7 @@
#endif
-static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
+static inline void * __cache_alloc (kmem_cache_t *cachep, unsigned int
__nocast flags)
{
unsigned long save_flags;
void* objp;
@@ -2296,7 +2296,7 @@
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
+void * kmem_cache_alloc (kmem_cache_t *cachep, unsigned int __nocast flags)
{
return __cache_alloc(cachep, flags);
}
@@ -2453,7 +2453,7 @@
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void * __kmalloc (size_t size, int flags)
+void * __kmalloc (size_t size, unsigned int __nocast flags)
{
struct cache_sizes *csizep = malloc_sizes;
@@ -2545,7 +2545,7 @@
* @size: element size.
* @flags: the type of memory to allocate.
*/
-void *kcalloc(size_t n, size_t size, int flags)
+void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
{
void *ret = NULL;
diff -Nru a/mm/vmalloc.c b/mm/vmalloc.c
--- a/mm/vmalloc.c 2005-03-28 21:06:08 -08:00
+++ b/mm/vmalloc.c 2005-03-28 21:06:08 -08:00
@@ -389,7 +389,7 @@
EXPORT_SYMBOL(vmap);
-void *__vmalloc_area(struct vm_struct *area, int gfp_mask, pgprot_t prot)
+void *__vmalloc_area(struct vm_struct *area, unsigned int __nocast gfp_mask,
pgprot_t prot)
{
struct page **pages;
unsigned int nr_pages, array_size, i;
@@ -440,7 +440,7 @@
* allocator with @gfp_mask flags. Map them into contiguous
* kernel virtual space, using a pagetable protection of @prot.
*/
-void *__vmalloc(unsigned long size, int gfp_mask, pgprot_t prot)
+void *__vmalloc(unsigned long size, unsigned int __nocast gfp_mask, pgprot_t
prot)
{
struct vm_struct *area;
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html