Re: [RFC] ipvs: Cleanup sync daemon code

2008-02-10 Thread Sven Wegener

On Sat, 9 Feb 2008, Christoph Hellwig wrote:


On Sun, Feb 10, 2008 at 12:38:11AM +0100, Sven Wegener wrote:

 struct ip_vs_sync_thread_data {
-   struct completion *startup;
+   struct completion *startup; /* set to NULL once completed */


This is not needed anmore.  kthread_run guarantees that the newly
creates thread is run before returning to the caller.


The completion is currently used to return an error code for errors that 
happen during initialization in the threads (open socket, allocate 
memory). We could move the setup code out of the threads and have them 
only run an error-safe loop.



+/* wait queue for master sync daemon */
+static DECLARE_WAIT_QUEUE_HEAD(sync_master_wait);


I don't think you need this one either.  You can use wake_up_process
on the task_struct pointer instead.


Thanks, now using schedule_timeout with wake_up_process.


spin_lock(ip_vs_sync_lock);
list_add_tail(sb-list, ip_vs_sync_queue);
+   if (++ip_vs_sync_count == 10)
+   wake_up_interruptible(sync_master_wait);
spin_unlock(ip_vs_sync_lock);
 }



-static int sync_thread(void *startup)
+static int sync_thread(void *data)


Btw, it might make sense to remove sync_thread and just call the
master and backup threads directly.


When the setup code has been moved out of the threads, the code gets much 
simpler.



+void __init ip_vs_sync_init(void)
+{
+   /* set up multicast address */
+   mcast_addr.sin_family = AF_INET;
+   mcast_addr.sin_port = htons(IP_VS_SYNC_PORT);
+   mcast_addr.sin_addr.s_addr = htonl(IP_VS_SYNC_GROUP);
 }


Why can't this be initialized at compile time by:

static struct sockaddr_in mcast_addr = {
.sin_family = AF_INET,
.sin_port   = htons(IP_VS_SYNC_PORT),
.sin_addr.s_addr= htonl(IP_VS_SYNC_GROUP),
}

(the hton* might need __constant_hton* also I'm not sure without trying)


Thanks.
--
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[RFC] ipvs: Cleanup sync daemon code

2008-02-09 Thread Sven Wegener

Hi all,

I'd like to get your feedback on this:

- Use kthread_run instead of doing a double-fork via kernel_thread()

- Return proper error codes to user-space on failures

Currently ipvsadm --start-daemon with an invalid --mcast-interface will 
silently suceed. With these changes we get an appropriate No such device 
error.


- Use wait queues for both master and backup thread

Instead of doing an endless loop with sleeping for one second, we now use 
wait queues. The master sync daemon has its own wait queue and gets woken 
up when we have enough data to sent and also at a regular interval. The 
backup sync daemon sits on the wait queue of the mcast socket and gets 
woken up as soon as we have data to process.


diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 56f3c94..519bd96 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -890,6 +890,7 @@ extern char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
 extern int start_sync_thread(int state, char *mcast_ifn, __u8 syncid);
 extern int stop_sync_thread(int state);
 extern void ip_vs_sync_conn(struct ip_vs_conn *cp);
+extern void ip_vs_sync_init(void);


 /*
diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c
index 963981a..0ccee4b 100644
--- a/net/ipv4/ipvs/ip_vs_core.c
+++ b/net/ipv4/ipvs/ip_vs_core.c
@@ -1071,6 +1071,8 @@ static int __init ip_vs_init(void)
 {
int ret;

+   ip_vs_sync_init();
+
ret = ip_vs_control_init();
if (ret  0) {
IP_VS_ERR(can't setup control.\n);
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 948378d..36063d3 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -29,6 +29,9 @@
 #include linux/in.h
 #include linux/igmp.h /* for ip_mc_join_group */
 #include linux/udp.h
+#include linux/kthread.h
+#include linux/wait.h
+#include linux/err.h

 #include net/ip.h
 #include net/sock.h
@@ -68,7 +71,8 @@ struct ip_vs_sync_conn_options {
 };

 struct ip_vs_sync_thread_data {
-   struct completion *startup;
+   struct completion *startup; /* set to NULL once completed */
+   int *retval; /* only valid until startup is completed */
int state;
 };

@@ -123,9 +127,10 @@ struct ip_vs_sync_buff {
 };


-/* the sync_buff list head and the lock */
+/* the sync_buff list head, the lock and the counter */
 static LIST_HEAD(ip_vs_sync_queue);
 static DEFINE_SPINLOCK(ip_vs_sync_lock);
+static unsigned int ip_vs_sync_count;

 /* current sync_buff for accepting new conn entries */
 static struct ip_vs_sync_buff   *curr_sb = NULL;
@@ -140,6 +145,13 @@ volatile int ip_vs_backup_syncid = 0;
 char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
 char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];

+/* sync daemon tasks */
+static struct task_struct *sync_master_thread;
+static struct task_struct *sync_backup_thread;
+
+/* wait queue for master sync daemon */
+static DECLARE_WAIT_QUEUE_HEAD(sync_master_wait);
+
 /* multicast addr */
 static struct sockaddr_in mcast_addr;

@@ -148,6 +160,8 @@ static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
 {
spin_lock(ip_vs_sync_lock);
list_add_tail(sb-list, ip_vs_sync_queue);
+   if (++ip_vs_sync_count == 10)
+   wake_up_interruptible(sync_master_wait);
spin_unlock(ip_vs_sync_lock);
 }

@@ -163,6 +177,7 @@ static inline struct ip_vs_sync_buff * sb_dequeue(void)
struct ip_vs_sync_buff,
list);
list_del(sb-list);
+   ip_vs_sync_count--;
}
spin_unlock_bh(ip_vs_sync_lock);

@@ -536,14 +551,17 @@ static int bind_mcastif_addr(struct socket *sock, char 
*ifname)
 static struct socket * make_send_sock(void)
 {
struct socket *sock;
+   int result;

/* First create a socket */
-   if (sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, sock)  0) {
+   result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, sock);
+   if (result  0) {
IP_VS_ERR(Error during creation of socket; terminating\n);
-   return NULL;
+   return ERR_PTR(result);
}

-   if (set_mcast_if(sock-sk, ip_vs_master_mcast_ifn)  0) {
+   result = set_mcast_if(sock-sk, ip_vs_master_mcast_ifn);
+   if (result  0) {
IP_VS_ERR(Error setting outbound mcast interface\n);
goto error;
}
@@ -551,14 +569,16 @@ static struct socket * make_send_sock(void)
set_mcast_loop(sock-sk, 0);
set_mcast_ttl(sock-sk, 1);

-   if (bind_mcastif_addr(sock, ip_vs_master_mcast_ifn)  0) {
+   result = bind_mcastif_addr(sock, ip_vs_master_mcast_ifn);
+   if (result  0) {
IP_VS_ERR(Error binding address of the mcast interface\n);
goto error;
}

-   if (sock-ops-connect(sock,
-  (struct sockaddr*)mcast_addr,
-  sizeof(struct 

[PATCH] ipvs: Make the synchronization interval controllable

2008-02-06 Thread Sven Wegener
The default synchronization interval of 1000 milliseconds is too high for a
heavily loaded director. Collecting the connection information from one second
and then sending it out in a burst will overflow the socket buffer and lead to
synchronization information being dropped. Make the interval controllable by a
sysctl variable so that users can tune it.

Signed-off-by: Sven Wegener [EMAIL PROTECTED]
---
 Documentation/networking/ipvs-sysctl.txt |9 +
 net/ipv4/ipvs/ip_vs_ctl.c|9 -
 net/ipv4/ipvs/ip_vs_sync.c   |6 --
 3 files changed, 21 insertions(+), 3 deletions(-)

diff --git a/Documentation/networking/ipvs-sysctl.txt 
b/Documentation/networking/ipvs-sysctl.txt
index 4ccdbca..1389e2f 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -141,3 +141,12 @@ sync_threshold - INTEGER
 synchronized, every time the number of its incoming packets
 modulus 50 equals the threshold. The range of the threshold is
 from 0 to 49.
+
+sync_interval - INTEGER
+   default 1000
+
+   The information from synchronization is buffered and sent out at
+   regular intervals by a kernel thread. The interval (in ms) is
+   controlled by this value. The default is too high for a heavily loaded
+   director. If you get a lot of ip_vs_send_async error messages from
+   your kernel, then you should lower this value.
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 94c5767..2781505 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -81,7 +81,7 @@ int sysctl_ip_vs_expire_nodest_conn = 0;
 int sysctl_ip_vs_expire_quiescent_template = 0;
 int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
 int sysctl_ip_vs_nat_icmp_send = 0;
-
+extern int sysctl_ip_vs_sync_interval;
 
 #ifdef CONFIG_IP_VS_DEBUG
 static int sysctl_ip_vs_debug_level = 0;
@@ -1582,6 +1582,13 @@ static struct ctl_table vs_vars[] = {
.proc_handler   = proc_do_sync_threshold,
},
{
+   .procname   = sync_interval,
+   .data   = sysctl_ip_vs_sync_interval,
+   .maxlen = sizeof(int),
+   .mode   = 0644,
+   .proc_handler   = proc_dointvec,
+   },
+   {
.procname   = nat_icmp_send,
.data   = sysctl_ip_vs_nat_icmp_send,
.maxlen = sizeof(int),
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 948378d..9b57ad3 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -143,6 +143,8 @@ char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
 /* multicast addr */
 static struct sockaddr_in mcast_addr;
 
+/* milliseconds between synchronization runs */
+int sysctl_ip_vs_sync_interval = 1000;
 
 static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
 {
@@ -701,7 +703,7 @@ static void sync_master_loop(void)
if (stop_master_sync)
break;
 
-   msleep_interruptible(1000);
+   msleep_interruptible(sysctl_ip_vs_sync_interval);
}
 
/* clean up the sync_buff queue */
@@ -758,7 +760,7 @@ static void sync_backup_loop(void)
if (stop_backup_sync)
break;
 
-   msleep_interruptible(1000);
+   msleep_interruptible(sysctl_ip_vs_sync_interval);
}
 
/* release the sending multicast socket */
-- 
1.5.4

--
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH] ipvs: Make the synchronization interval controllable

2008-02-06 Thread Sven Wegener

On Wed, 6 Feb 2008, David Rientjes wrote:


On Wed, 6 Feb 2008, Sven Wegener wrote:


diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 948378d..9b57ad3 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -143,6 +143,8 @@ char ip_vs_backup_mcast_ifn[IP_VS_IFNAME_MAXLEN];
 /* multicast addr */
 static struct sockaddr_in mcast_addr;

+/* milliseconds between synchronization runs */
+int sysctl_ip_vs_sync_interval = 1000;

 static inline void sb_queue_tail(struct ip_vs_sync_buff *sb)
 {


How useful is a negative ip_vs_sync_interval?


Negative values will be converted to MAX_JIFFY_OFFSET by msecs_to_jiffies 
and result in a very long interval. A too long interval will be a good way 
to get your system OOM. We could use an unsigned int or even restrict the 
value with proc_dointvec_minmax. I'd prefer the latter, that's what I 
already had in my mind and it also protects from unintentionally choosing 
a too long interval.


Sven
--
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] ipvs: Make the synchronization interval controllable

2008-02-06 Thread Sven Wegener
The default synchronization interval of 1000 milliseconds is too high for a
heavily loaded director. Collecting the connection information from one second
and then sending it out in a burst will overflow the socket buffer and lead to
synchronization information being dropped. Make the interval controllable by a
sysctl variable so that users can tune it. We enforce a lower limit of 0 and an
upper limit of 2000 ms on the interval. A too large interval can make the
synchronization buffer consume too much memory and will also delay the exit of
the kernel threads.

Signed-off-by: Sven Wegener [EMAIL PROTECTED]
---

Changes from the last version include the addition of the range enforcement.
Also place the definitions of the variables where all other ipvs sysctl
variables are.

Documentation/networking/ipvs-sysctl.txt |   10 ++
 include/net/ip_vs.h  |1 +
 net/ipv4/ipvs/ip_vs_ctl.c|   12 
 net/ipv4/ipvs/ip_vs_sync.c   |4 ++--
 4 files changed, 25 insertions(+), 2 deletions(-)

diff --git a/Documentation/networking/ipvs-sysctl.txt 
b/Documentation/networking/ipvs-sysctl.txt
index 4ccdbca..bb4eb9a 100644
--- a/Documentation/networking/ipvs-sysctl.txt
+++ b/Documentation/networking/ipvs-sysctl.txt
@@ -141,3 +141,13 @@ sync_threshold - INTEGER
 synchronized, every time the number of its incoming packets
 modulus 50 equals the threshold. The range of the threshold is
 from 0 to 49.
+
+sync_interval - INTEGER
+   default 1000
+
+   The information from synchronization is buffered and sent out at a
+   regular interval by a kernel thread. The interval (in ms) is
+   controlled by this value. The default is too high for a heavily loaded
+   director. If you get a lot of ip_vs_send_async error messages from
+   your kernel, then you should lower this value. The value of the
+   interval can be chosen from the range from 0 to 2000.
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 56f3c94..9c4498b 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -854,6 +854,7 @@ extern int sysctl_ip_vs_cache_bypass;
 extern int sysctl_ip_vs_expire_nodest_conn;
 extern int sysctl_ip_vs_expire_quiescent_template;
 extern int sysctl_ip_vs_sync_threshold[2];
+extern int sysctl_ip_vs_sync_interval;
 extern int sysctl_ip_vs_nat_icmp_send;
 extern struct ip_vs_stats ip_vs_stats;
 extern struct ctl_path net_vs_ctl_path[];
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 94c5767..c6322f7 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -80,8 +80,11 @@ int sysctl_ip_vs_cache_bypass = 0;
 int sysctl_ip_vs_expire_nodest_conn = 0;
 int sysctl_ip_vs_expire_quiescent_template = 0;
 int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
+int sysctl_ip_vs_sync_interval = 1000;
 int sysctl_ip_vs_nat_icmp_send = 0;
 
+static int ip_vs_sync_interval_min = 0;
+static int ip_vs_sync_interval_max = 2000;
 
 #ifdef CONFIG_IP_VS_DEBUG
 static int sysctl_ip_vs_debug_level = 0;
@@ -1582,6 +1585,15 @@ static struct ctl_table vs_vars[] = {
.proc_handler   = proc_do_sync_threshold,
},
{
+   .procname   = sync_interval,
+   .data   = sysctl_ip_vs_sync_interval,
+   .maxlen = sizeof(int),
+   .mode   = 0644,
+   .proc_handler   = proc_dointvec_minmax,
+   .extra1 = ip_vs_sync_interval_min,
+   .extra2 = ip_vs_sync_interval_max,
+   },
+   {
.procname   = nat_icmp_send,
.data   = sysctl_ip_vs_nat_icmp_send,
.maxlen = sizeof(int),
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 948378d..10ab1b7 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -701,7 +701,7 @@ static void sync_master_loop(void)
if (stop_master_sync)
break;
 
-   msleep_interruptible(1000);
+   msleep_interruptible(sysctl_ip_vs_sync_interval);
}
 
/* clean up the sync_buff queue */
@@ -758,7 +758,7 @@ static void sync_backup_loop(void)
if (stop_backup_sync)
break;
 
-   msleep_interruptible(1000);
+   msleep_interruptible(sysctl_ip_vs_sync_interval);
}
 
/* release the sending multicast socket */
-- 
1.5.4

--
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[PATCH] ipvs: Make wrr no available servers error message rate-limited

2008-02-05 Thread Sven Wegener
No available servers is more an error message than something informational. It
should also be rate-limited, else we're going to flood our logs on a busy
director, if all real servers are out of order with a weight of zero.

Signed-off-by: Sven Wegener [EMAIL PROTECTED]
---

Actually, do we need this message at all? The wrr scheduler is the only one
printing an error message in such a case.

 net/ipv4/ipvs/ip_vs_wrr.c |3 ++-
 1 files changed, 2 insertions(+), 1 deletions(-)

diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 749fa04..85c680a 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -22,6 +22,7 @@
 
 #include linux/module.h
 #include linux/kernel.h
+#include linux/net.h
 
 #include net/ip_vs.h
 
@@ -169,7 +170,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct 
sk_buff *skb)
 */
if (mark-cw == 0) {
mark-cl = svc-destinations;
-   IP_VS_INFO(ip_vs_wrr_schedule(): 
+   IP_VS_ERR_RL(ip_vs_wrr_schedule(): 
   no available servers\n);
dest = NULL;
goto out;
-- 
1.5.3.7

--
To unsubscribe from this list: send the line unsubscribe netdev in
the body of a message to [EMAIL PROTECTED]
More majordomo info at  http://vger.kernel.org/majordomo-info.html