ChangeSet 1.2234, 2005/03/28 20:43:20-08:00, [EMAIL PROTECTED]
Merge whitespace and __nocast changes
drivers/block/pktcdvd.c | 4 ++--
drivers/s390/scsi/zfcp_aux.c | 2 +-
fs/mpage.c | 3 ++-
include/linux/jbd.h | 2 +-
kernel/signal.c | 2 +-
mm/filemap.c | 2 +-
mm/mempolicy.c | 8 ++++----
mm/slab.c | 22 +++++++++++-----------
8 files changed, 23 insertions(+), 22 deletions(-)
diff -Nru a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
--- a/drivers/block/pktcdvd.c 2005-03-28 22:03:08 -08:00
+++ b/drivers/block/pktcdvd.c 2005-03-28 22:03:08 -08:00
@@ -219,7 +219,7 @@
return 1;
}
-static void *pkt_rb_alloc(int gfp_mask, void *data)
+static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
{
return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
}
@@ -2059,7 +2059,7 @@
}
-static void *psd_pool_alloc(int gfp_mask, void *data)
+static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
{
return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
}
diff -Nru a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
--- a/drivers/s390/scsi/zfcp_aux.c 2005-03-28 22:03:08 -08:00
+++ b/drivers/s390/scsi/zfcp_aux.c 2005-03-28 22:03:08 -08:00
@@ -928,7 +928,7 @@
}
static void *
-zfcp_mempool_alloc(int gfp_mask, void *size)
+zfcp_mempool_alloc(unsigned int __nocast gfp_mask, void *size)
{
return kmalloc((size_t) size, gfp_mask);
}
diff -Nru a/fs/mpage.c b/fs/mpage.c
--- a/fs/mpage.c 2005-03-28 22:03:08 -08:00
+++ b/fs/mpage.c 2005-03-28 22:03:08 -08:00
@@ -98,7 +98,8 @@
static struct bio *
mpage_alloc(struct block_device *bdev,
- sector_t first_sector, int nr_vecs, int gfp_flags)
+ sector_t first_sector, int nr_vecs,
+ unsigned int __nocast gfp_flags)
{
struct bio *bio;
diff -Nru a/include/linux/jbd.h b/include/linux/jbd.h
--- a/include/linux/jbd.h 2005-03-28 22:03:08 -08:00
+++ b/include/linux/jbd.h 2005-03-28 22:03:08 -08:00
@@ -935,7 +935,7 @@
*/
extern kmem_cache_t *jbd_handle_cache;
-static inline handle_t *jbd_alloc_handle(int gfp_flags)
+static inline handle_t *jbd_alloc_handle(unsigned int __nocast gfp_flags)
{
return kmem_cache_alloc(jbd_handle_cache, gfp_flags);
}
diff -Nru a/kernel/signal.c b/kernel/signal.c
--- a/kernel/signal.c 2005-03-28 22:03:08 -08:00
+++ b/kernel/signal.c 2005-03-28 22:03:08 -08:00
@@ -259,7 +259,7 @@
return sig;
}
-static struct sigqueue *__sigqueue_alloc(struct task_struct *t, int flags,
+static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int
__nocast flags,
int override_rlimit)
{
struct sigqueue *q = NULL;
diff -Nru a/mm/filemap.c b/mm/filemap.c
--- a/mm/filemap.c 2005-03-28 22:03:08 -08:00
+++ b/mm/filemap.c 2005-03-28 22:03:08 -08:00
@@ -666,7 +666,7 @@
grab_cache_page_nowait(struct address_space *mapping, unsigned long index)
{
struct page *page = find_get_page(mapping, index);
- int gfp_mask;
+ unsigned int gfp_mask;
if (page) {
if (!TestSetPageLocked(page))
diff -Nru a/mm/mempolicy.c b/mm/mempolicy.c
--- a/mm/mempolicy.c 2005-03-28 22:03:08 -08:00
+++ b/mm/mempolicy.c 2005-03-28 22:03:08 -08:00
@@ -648,7 +648,7 @@
}
/* Return a zonelist representing a mempolicy */
-static struct zonelist *zonelist_policy(unsigned gfp, struct mempolicy *policy)
+static struct zonelist *zonelist_policy(unsigned int __nocast gfp, struct
mempolicy *policy)
{
int nd;
@@ -712,7 +712,7 @@
/* Allocate a page in interleaved policy.
Own path because it needs to do special accounting. */
-static struct page *alloc_page_interleave(unsigned gfp, unsigned order,
unsigned nid)
+static struct page *alloc_page_interleave(unsigned int __nocast gfp, unsigned
order, unsigned nid)
{
struct zonelist *zl;
struct page *page;
@@ -750,7 +750,7 @@
* Should be called with the mm_sem of the vma hold.
*/
struct page *
-alloc_page_vma(unsigned gfp, struct vm_area_struct *vma, unsigned long addr)
+alloc_page_vma(unsigned int __nocast gfp, struct vm_area_struct *vma, unsigned
long addr)
{
struct mempolicy *pol = get_vma_policy(vma, addr);
@@ -793,7 +793,7 @@
* 1) it's ok to take cpuset_sem (can WAIT), and
* 2) allocating for current task (not interrupt).
*/
-struct page *alloc_pages_current(unsigned gfp, unsigned order)
+struct page *alloc_pages_current(unsigned int __nocast gfp, unsigned order)
{
struct mempolicy *pol = current->mempolicy;
diff -Nru a/mm/slab.c b/mm/slab.c
--- a/mm/slab.c 2005-03-28 22:03:08 -08:00
+++ b/mm/slab.c 2005-03-28 22:03:08 -08:00
@@ -895,7 +895,7 @@
* did not request dmaable memory, we might get it, but that
* would be relatively rare and ignorable.
*/
-static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid)
+static void *kmem_getpages(kmem_cache_t *cachep, unsigned int __nocast flags,
int nodeid)
{
struct page *page;
void *addr;
@@ -1684,7 +1684,7 @@
/* Get the memory for a slab management obj. */
static struct slab* alloc_slabmgmt(kmem_cache_t *cachep,
- void *objp, int colour_off, int local_flags)
+ void *objp, int colour_off, unsigned int __nocast
local_flags)
{
struct slab *slabp;
@@ -1755,7 +1755,7 @@
slabp->free = 0;
}
-static void kmem_flagcheck(kmem_cache_t *cachep, int flags)
+static void kmem_flagcheck(kmem_cache_t *cachep, unsigned int flags)
{
if (flags & SLAB_DMA) {
if (!(cachep->gfpflags & GFP_DMA))
@@ -1785,12 +1785,12 @@
* Grow (by 1) the number of slabs within a cache. This is called by
* kmem_cache_alloc() when there are no active objs left in a cache.
*/
-static int cache_grow(kmem_cache_t *cachep, int flags, int nodeid)
+static int cache_grow(kmem_cache_t *cachep, unsigned int __nocast flags, int
nodeid)
{
struct slab *slabp;
void *objp;
size_t offset;
- int local_flags;
+ unsigned int local_flags;
unsigned long ctor_flags;
/* Be lazy and only check for valid flags here,
@@ -1988,7 +1988,7 @@
#define check_slabp(x,y) do { } while(0)
#endif
-static void *cache_alloc_refill(kmem_cache_t *cachep, int flags)
+static void *cache_alloc_refill(kmem_cache_t *cachep, unsigned int __nocast
flags)
{
int batchcount;
struct kmem_list3 *l3;
@@ -2085,7 +2085,7 @@
}
static inline void
-cache_alloc_debugcheck_before(kmem_cache_t *cachep, int flags)
+cache_alloc_debugcheck_before(kmem_cache_t *cachep, unsigned int __nocast
flags)
{
might_sleep_if(flags & __GFP_WAIT);
#if DEBUG
@@ -2140,7 +2140,7 @@
#endif
-static inline void *__cache_alloc(kmem_cache_t *cachep, int flags)
+static inline void *__cache_alloc(kmem_cache_t *cachep, unsigned int __nocast
flags)
{
unsigned long save_flags;
void* objp;
@@ -2302,7 +2302,7 @@
* Allocate an object from this cache. The flags are only relevant
* if the cache has no available objects.
*/
-void *kmem_cache_alloc(kmem_cache_t *cachep, int flags)
+void *kmem_cache_alloc(kmem_cache_t *cachep, unsigned int __nocast flags)
{
return __cache_alloc(cachep, flags);
}
@@ -2458,7 +2458,7 @@
* platforms. For example, on i386, it means that the memory must come
* from the first 16MB.
*/
-void *__kmalloc(size_t size, int flags)
+void *__kmalloc(size_t size, unsigned int __nocast flags)
{
kmem_cache_t *cachep;
@@ -2537,7 +2537,7 @@
* @size: element size.
* @flags: the type of memory to allocate.
*/
-void *kcalloc(size_t n, size_t size, int flags)
+void *kcalloc(size_t n, size_t size, unsigned int __nocast flags)
{
void *ret = NULL;
-
To unsubscribe from this list: send the line "unsubscribe bk-commits-head" in
the body of a message to [EMAIL PROTECTED]
More majordomo info at http://vger.kernel.org/majordomo-info.html