Make ->hash_count, ->low_watermark and ->high_watermark unsigned int
and propagate unsignedness to other variables.

This change doesn't change code generation because these fields aren't
used in 64-bit contexts but make it anyway: these fields can't be
negative numbers.

Signed-off-by: Alexey Dobriyan <adobri...@gmail.com>
---

 include/net/flowcache.h |    6 +++---
 net/core/flow.c         |   13 +++++++------
 2 files changed, 10 insertions(+), 9 deletions(-)

--- a/include/net/flowcache.h
+++ b/include/net/flowcache.h
@@ -8,7 +8,7 @@
 
 struct flow_cache_percpu {
        struct hlist_head               *hash_table;
-       int                             hash_count;
+       unsigned int                    hash_count;
        u32                             hash_rnd;
        int                             hash_rnd_recalc;
        struct tasklet_struct           flush_tasklet;
@@ -18,8 +18,8 @@ struct flow_cache {
        u32                             hash_shift;
        struct flow_cache_percpu __percpu *percpu;
        struct hlist_node               node;
-       int                             low_watermark;
-       int                             high_watermark;
+       unsigned int                    low_watermark;
+       unsigned int                    high_watermark;
        struct timer_list               rnd_timer;
 };
 #endif /* _NET_FLOWCACHE_H */
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -99,7 +99,8 @@ static void flow_cache_gc_task(struct work_struct *work)
 }
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
-                                    int deleted, struct list_head *gc_list,
+                                    unsigned int deleted,
+                                    struct list_head *gc_list,
                                     struct netns_xfrm *xfrm)
 {
        if (deleted) {
@@ -114,18 +115,18 @@ static void flow_cache_queue_garbage(struct 
flow_cache_percpu *fcp,
 
 static void __flow_cache_shrink(struct flow_cache *fc,
                                struct flow_cache_percpu *fcp,
-                               int shrink_to)
+                               unsigned int shrink_to)
 {
        struct flow_cache_entry *fle;
        struct hlist_node *tmp;
        LIST_HEAD(gc_list);
-       int deleted = 0;
+       unsigned int deleted = 0;
        struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
                                                flow_cache_global);
        unsigned int i;
 
        for (i = 0; i < flow_cache_hash_size(fc); i++) {
-               int saved = 0;
+               unsigned int saved = 0;
 
                hlist_for_each_entry_safe(fle, tmp,
                                          &fcp->hash_table[i], u.hlist) {
@@ -146,7 +147,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
 static void flow_cache_shrink(struct flow_cache *fc,
                              struct flow_cache_percpu *fcp)
 {
-       int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
+       unsigned int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
 
        __flow_cache_shrink(fc, fcp, shrink_to);
 }
@@ -296,7 +297,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
        struct flow_cache_entry *fle;
        struct hlist_node *tmp;
        LIST_HEAD(gc_list);
-       int deleted = 0;
+       unsigned int deleted = 0;
        struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
                                                flow_cache_global);
        unsigned int i;

Reply via email to