Re: [PATCH v4 net-next 2/4] net: convert low latency sockets to sched_clock()
On 14/06/2013 06:12, Eric Dumazet wrote: On Fri, 2013-06-14 at 04:57 +0300, Eliezer Tamir wrote: Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir --- -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { - return !time_after((unsigned long)get_cycles(), + return !time_after((unsigned long)sched_clock(), (unsigned long)end_time); } I do not really understand why you bother to have 64bit wide values, and then use these "unsigned long" casts here. On 32bit arches, this will really limit to 2^31 ns range. You should instead either : - use time_after_64() or - explicitly limit sysctl_net_ll_poll range OK -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH v4 net-next 2/4] net: convert low latency sockets to sched_clock()
On 14/06/2013 06:12, Eric Dumazet wrote: On Fri, 2013-06-14 at 04:57 +0300, Eliezer Tamir wrote: Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir eliezer.ta...@linux.intel.com --- -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { - return !time_after((unsigned long)get_cycles(), + return !time_after((unsigned long)sched_clock(), (unsigned long)end_time); } I do not really understand why you bother to have 64bit wide values, and then use these unsigned long casts here. On 32bit arches, this will really limit to 2^31 ns range. You should instead either : - use time_after_64() or - explicitly limit sysctl_net_ll_poll range OK -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH v4 net-next 2/4] net: convert low latency sockets to sched_clock()
On Fri, 2013-06-14 at 04:57 +0300, Eliezer Tamir wrote: > Use sched_clock() instead of get_cycles(). > We can use sched_clock() because we don't care much about accuracy. > Remove the dependency on X86_TSC > > Signed-off-by: Eliezer Tamir > --- > > -static inline bool can_poll_ll(cycles_t end_time) > +static inline bool can_poll_ll(u64 end_time) > { > - return !time_after((unsigned long)get_cycles(), > + return !time_after((unsigned long)sched_clock(), > (unsigned long)end_time); > } I do not really understand why you bother to have 64bit wide values, and then use these "unsigned long" casts here. On 32bit arches, this will really limit to 2^31 ns range. You should instead either : - use time_after_64() or - explicitly limit sysctl_net_ll_poll range -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
[PATCH v4 net-next 2/4] net: convert low latency sockets to sched_clock()
Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir --- include/net/ll_poll.h | 32 +--- net/Kconfig |1 - 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 44e2f70..8608547 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -21,10 +21,6 @@ * e1000-devel Mailing List */ -/* - * For now this depends on CONFIG_X86_TSC - */ - #ifndef _LINUX_NET_LL_POLL_H #define _LINUX_NET_LL_POLL_H @@ -40,13 +36,19 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED-1 #define LL_FLUSH_BUSY -2 -/* we don't mind a ~2.5% imprecision */ -#define TSC_MHZ (tsc_khz >> 10) - -static inline cycles_t ll_end_time(void) +/* we can use sched_clock() because we don't care much about precision + * we only care that the average is bounded + */ +static inline u64 ll_end_time(void) { - return (cycles_t)TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) - + get_cycles(); + u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + + /* we don't mind a ~2.5% imprecision +* sysctl_net_ll_poll is a u_int so this can't overflow +*/ + end_time = (end_time << 10) + sched_clock(); + + return end_time; } static inline bool sk_valid_ll(struct sock *sk) @@ -55,16 +57,16 @@ static inline bool sk_valid_ll(struct sock *sk) !need_resched() && !signal_pending(current); } -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { - return !time_after((unsigned long)get_cycles(), + return !time_after((unsigned long)sched_clock(), (unsigned long)end_time); } static inline bool sk_poll_ll(struct sock *sk, int nonblock) { - cycles_t end_time = ll_end_time(); const struct net_device_ops *ops; + u64 end_time = ll_end_time(); struct napi_struct *napi; int rc = false; @@ -117,7 +119,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline cycles_t ll_end_time(void) +static inline u64 ll_end_time(void) { return 0; } @@ -140,7 +142,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { return false; } diff --git a/net/Kconfig b/net/Kconfig index d6a9ce6..e591668 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -245,7 +245,6 @@ config NETPRIO_CGROUP config NET_LL_RX_POLL bool "Low Latency Receive Poll" - depends on X86_TSC default n ---help--- Support Low Latency Receive Queue Poll. -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
[PATCH v4 net-next 2/4] net: convert low latency sockets to sched_clock()
Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir eliezer.ta...@linux.intel.com --- include/net/ll_poll.h | 32 +--- net/Kconfig |1 - 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/include/net/ll_poll.h b/include/net/ll_poll.h index 44e2f70..8608547 100644 --- a/include/net/ll_poll.h +++ b/include/net/ll_poll.h @@ -21,10 +21,6 @@ * e1000-devel Mailing List e1000-de...@lists.sourceforge.net */ -/* - * For now this depends on CONFIG_X86_TSC - */ - #ifndef _LINUX_NET_LL_POLL_H #define _LINUX_NET_LL_POLL_H @@ -40,13 +36,19 @@ extern unsigned int sysctl_net_ll_poll __read_mostly; #define LL_FLUSH_FAILED-1 #define LL_FLUSH_BUSY -2 -/* we don't mind a ~2.5% imprecision */ -#define TSC_MHZ (tsc_khz 10) - -static inline cycles_t ll_end_time(void) +/* we can use sched_clock() because we don't care much about precision + * we only care that the average is bounded + */ +static inline u64 ll_end_time(void) { - return (cycles_t)TSC_MHZ * ACCESS_ONCE(sysctl_net_ll_poll) - + get_cycles(); + u64 end_time = ACCESS_ONCE(sysctl_net_ll_poll); + + /* we don't mind a ~2.5% imprecision +* sysctl_net_ll_poll is a u_int so this can't overflow +*/ + end_time = (end_time 10) + sched_clock(); + + return end_time; } static inline bool sk_valid_ll(struct sock *sk) @@ -55,16 +57,16 @@ static inline bool sk_valid_ll(struct sock *sk) !need_resched() !signal_pending(current); } -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { - return !time_after((unsigned long)get_cycles(), + return !time_after((unsigned long)sched_clock(), (unsigned long)end_time); } static inline bool sk_poll_ll(struct sock *sk, int nonblock) { - cycles_t end_time = ll_end_time(); const struct net_device_ops *ops; + u64 end_time = ll_end_time(); struct napi_struct *napi; int rc = false; @@ -117,7 +119,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) #else /* CONFIG_NET_LL_RX_POLL */ -static inline cycles_t ll_end_time(void) +static inline u64 ll_end_time(void) { return 0; } @@ -140,7 +142,7 @@ static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb) { } -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { return false; } diff --git a/net/Kconfig b/net/Kconfig index d6a9ce6..e591668 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -245,7 +245,6 @@ config NETPRIO_CGROUP config NET_LL_RX_POLL bool Low Latency Receive Poll - depends on X86_TSC default n ---help--- Support Low Latency Receive Queue Poll. -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Re: [PATCH v4 net-next 2/4] net: convert low latency sockets to sched_clock()
On Fri, 2013-06-14 at 04:57 +0300, Eliezer Tamir wrote: Use sched_clock() instead of get_cycles(). We can use sched_clock() because we don't care much about accuracy. Remove the dependency on X86_TSC Signed-off-by: Eliezer Tamir eliezer.ta...@linux.intel.com --- -static inline bool can_poll_ll(cycles_t end_time) +static inline bool can_poll_ll(u64 end_time) { - return !time_after((unsigned long)get_cycles(), + return !time_after((unsigned long)sched_clock(), (unsigned long)end_time); } I do not really understand why you bother to have 64bit wide values, and then use these unsigned long casts here. On 32bit arches, this will really limit to 2^31 ns range. You should instead either : - use time_after_64() or - explicitly limit sysctl_net_ll_poll range -- To unsubscribe from this list: send the line unsubscribe linux-kernel in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/