The branch main has been updated by tuexen: URL: https://cgit.FreeBSD.org/src/commit/?id=f197a24d11967b5780717d05e84a749f34b773c4
commit f197a24d11967b5780717d05e84a749f34b773c4 Author: Nick Banks <nickba...@netflix.com> AuthorDate: 2025-07-31 13:03:47 +0000 Commit: Michael Tuexen <tue...@freebsd.org> CommitDate: 2025-07-31 13:03:47 +0000 tcp: improve variable and constant names Don't use ticks in variable names or constant when they don't have a relation to ticks. Use slots or usecs. No functional change intended. Reviewed by: tuexen MFC after: 1 week Sponsored by: Netflix, Inc. --- sys/netinet/tcp_hpts.c | 32 ++++++++++++++++---------------- sys/netinet/tcp_hpts.h | 20 ++++++++++---------- sys/netinet/tcp_stacks/bbr.c | 6 +++--- sys/netinet/tcp_stacks/rack.c | 26 +++++++++++++------------- 4 files changed, 42 insertions(+), 42 deletions(-) diff --git a/sys/netinet/tcp_hpts.c b/sys/netinet/tcp_hpts.c index 40d8ca15ed83..8cbe1e81a06a 100644 --- a/sys/netinet/tcp_hpts.c +++ b/sys/netinet/tcp_hpts.c @@ -366,7 +366,7 @@ sysctl_net_inet_tcp_hpts_max_sleep(SYSCTL_HANDLER_ARGS) new = hpts_sleep_max; error = sysctl_handle_int(oidp, &new, 0, req); if (error == 0 && req->newptr) { - if ((new < (dynamic_min_sleep/HPTS_TICKS_PER_SLOT)) || + if ((new < (dynamic_min_sleep/HPTS_USECS_PER_SLOT)) || (new > HPTS_MAX_SLEEP_ALLOWED)) error = EINVAL; else @@ -404,15 +404,15 @@ SYSCTL_PROC(_net_inet_tcp_hpts, OID_AUTO, minsleep, &sysctl_net_inet_tcp_hpts_min_sleep, "IU", "The minimum time the hpts must sleep before processing more slots"); -static int ticks_indicate_more_sleep = TICKS_INDICATE_MORE_SLEEP; -static int ticks_indicate_less_sleep = TICKS_INDICATE_LESS_SLEEP; +static int slots_indicate_more_sleep = SLOTS_INDICATE_MORE_SLEEP; +static int slots_indicate_less_sleep = SLOTS_INDICATE_LESS_SLEEP; static int tcp_hpts_no_wake_over_thresh = 1; SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, more_sleep, CTLFLAG_RW, - &ticks_indicate_more_sleep, 0, + &slots_indicate_more_sleep, 0, "If we only process this many or less on a timeout, we need longer sleep on the next callout"); SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, less_sleep, CTLFLAG_RW, - &ticks_indicate_less_sleep, 0, + &slots_indicate_less_sleep, 0, "If we process this many or more on a timeout, we need less sleep on the next callout"); SYSCTL_INT(_net_inet_tcp_hpts, OID_AUTO, nowake_over_thresh, CTLFLAG_RW, &tcp_hpts_no_wake_over_thresh, 0, @@ -877,7 +877,7 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_ return (slot_on); } /* Get the current time relative to the wheel */ - wheel_cts = tcp_tv_to_hptstick(&tv); + wheel_cts = tcp_tv_to_hpts_slot(&tv); /* Map it onto the wheel */ wheel_slot = tick_to_wheel(wheel_cts); /* Now what's the max we can place it at? */ @@ -949,7 +949,7 @@ tcp_hpts_insert_diag(struct tcpcb *tp, uint32_t slot, int32_t line, struct hpts_ * We need to reschedule the hpts's time-out. */ hpts->p_hpts_sleep_time = slot; - need_new_to = slot * HPTS_TICKS_PER_SLOT; + need_new_to = slot * HPTS_USECS_PER_SLOT; } } /* @@ -1121,7 +1121,7 @@ again: HPTS_MTX_ASSERT(hpts); slots_to_run = hpts_slots_diff(hpts->p_prev_slot, hpts->p_cur_slot); if (((hpts->p_curtick - hpts->p_lasttick) > - ((NUM_OF_HPTSI_SLOTS-1) * HPTS_TICKS_PER_SLOT)) && + ((NUM_OF_HPTSI_SLOTS-1) * HPTS_USECS_PER_SLOT)) && (hpts->p_on_queue_cnt != 0)) { /* * Wheel wrap is occuring, basically we @@ -1202,7 +1202,7 @@ again: * was not any (i.e. if slots_to_run == 1, no delay). */ hpts->p_delayed_by = (slots_to_run - (i + 1)) * - HPTS_TICKS_PER_SLOT; + HPTS_USECS_PER_SLOT; runningslot = hpts->p_runningslot; hptsh = &hpts->p_hptss[runningslot]; @@ -1569,7 +1569,7 @@ __tcp_run_hpts(void) ticks_ran = tcp_hptsi(hpts, false); /* We may want to adjust the sleep values here */ if (hpts->p_on_queue_cnt >= conn_cnt_thresh) { - if (ticks_ran > ticks_indicate_less_sleep) { + if (ticks_ran > slots_indicate_less_sleep) { struct timeval tv; sbintime_t sb; @@ -1579,7 +1579,7 @@ __tcp_run_hpts(void) /* Reschedule with new to value */ tcp_hpts_set_max_sleep(hpts, 0); tv.tv_sec = 0; - tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT; + tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT; /* Validate its in the right ranges */ if (tv.tv_usec < hpts->p_mysleep.tv_usec) { hpts->overidden_sleep = tv.tv_usec; @@ -1601,7 +1601,7 @@ __tcp_run_hpts(void) callout_reset_sbt_on(&hpts->co, sb, 0, hpts_timeout_swi, hpts, hpts->p_cpu, (C_DIRECT_EXEC | C_PREL(tcp_hpts_precision))); - } else if (ticks_ran < ticks_indicate_more_sleep) { + } else if (ticks_ran < slots_indicate_more_sleep) { /* For the further sleep, don't reschedule hpts */ hpts->p_mysleep.tv_usec *= 2; if (hpts->p_mysleep.tv_usec > dynamic_max_sleep) @@ -1683,7 +1683,7 @@ tcp_hpts_thread(void *ctx) hpts->p_hpts_active = 1; ticks_ran = tcp_hptsi(hpts, true); tv.tv_sec = 0; - tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT; + tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT; if ((hpts->p_on_queue_cnt > conn_cnt_thresh) && (hpts->hit_callout_thresh == 0)) { hpts->hit_callout_thresh = 1; atomic_add_int(&hpts_that_need_softclock, 1); @@ -1697,11 +1697,11 @@ tcp_hpts_thread(void *ctx) * Only adjust sleep time if we were * called from the callout i.e. direct_wake == 0. */ - if (ticks_ran < ticks_indicate_more_sleep) { + if (ticks_ran < slots_indicate_more_sleep) { hpts->p_mysleep.tv_usec *= 2; if (hpts->p_mysleep.tv_usec > dynamic_max_sleep) hpts->p_mysleep.tv_usec = dynamic_max_sleep; - } else if (ticks_ran > ticks_indicate_less_sleep) { + } else if (ticks_ran > slots_indicate_less_sleep) { hpts->p_mysleep.tv_usec /= 2; if (hpts->p_mysleep.tv_usec < dynamic_min_sleep) hpts->p_mysleep.tv_usec = dynamic_min_sleep; @@ -1995,7 +1995,7 @@ tcp_hpts_mod_load(void) } } tv.tv_sec = 0; - tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_TICKS_PER_SLOT; + tv.tv_usec = hpts->p_hpts_sleep_time * HPTS_USECS_PER_SLOT; hpts->sleeping = tv.tv_usec; sb = tvtosbt(tv); callout_reset_sbt_on(&hpts->co, sb, 0, diff --git a/sys/netinet/tcp_hpts.h b/sys/netinet/tcp_hpts.h index d9c01d6d2ac4..61f5160ffad6 100644 --- a/sys/netinet/tcp_hpts.h +++ b/sys/netinet/tcp_hpts.h @@ -26,8 +26,8 @@ #ifndef __tcp_hpts_h__ #define __tcp_hpts_h__ -/* Number of useconds in a hpts tick */ -#define HPTS_TICKS_PER_SLOT 10 +/* Number of useconds represented by an hpts slot */ +#define HPTS_USECS_PER_SLOT 10 #define HPTS_MS_TO_SLOTS(x) ((x * 100) + 1) #define HPTS_USEC_TO_SLOTS(x) ((x+9) /10) #define HPTS_USEC_IN_SEC 1000000 @@ -91,8 +91,8 @@ struct hpts_diag { #define DYNAMIC_MAX_SLEEP 5000 /* 5ms */ /* Thresholds for raising/lowering sleep */ -#define TICKS_INDICATE_MORE_SLEEP 100 /* This would be 1ms */ -#define TICKS_INDICATE_LESS_SLEEP 1000 /* This would indicate 10ms */ +#define SLOTS_INDICATE_MORE_SLEEP 100 /* This would be 1ms */ +#define SLOTS_INDICATE_LESS_SLEEP 1000 /* This would indicate 10ms */ /** * * Dynamic adjustment of sleeping times is done in "new" mode @@ -102,10 +102,10 @@ struct hpts_diag { * When we are in the "new" mode i.e. conn_cnt > conn_cnt_thresh * then we do a dynamic adjustment on the time we sleep. * Our threshold is if the lateness of the first client served (in ticks) is - * greater than or equal too ticks_indicate_more_sleep (10ms + * greater than or equal too slots_indicate_more_sleep (10ms * or 10000 ticks). If we were that late, the actual sleep time * is adjusted down by 50%. If the ticks_ran is less than - * ticks_indicate_more_sleep (100 ticks or 1000usecs). + * slots_indicate_more_sleep (100 ticks or 1000usecs). * */ @@ -165,9 +165,9 @@ extern int32_t tcp_min_hptsi_time; * to userspace as well. */ static inline uint32_t -tcp_tv_to_hptstick(const struct timeval *sv) +tcp_tv_to_hpts_slot(const struct timeval *sv) { - return ((sv->tv_sec * 100000) + (sv->tv_usec / HPTS_TICKS_PER_SLOT)); + return ((sv->tv_sec * 100000) + (sv->tv_usec / HPTS_USECS_PER_SLOT)); } static inline uint32_t @@ -195,7 +195,7 @@ extern int32_t tcp_min_hptsi_time; static inline int32_t get_hpts_min_sleep_time(void) { - return (tcp_min_hptsi_time + HPTS_TICKS_PER_SLOT); + return (tcp_min_hptsi_time + HPTS_USECS_PER_SLOT); } static inline uint32_t @@ -206,7 +206,7 @@ tcp_gethptstick(struct timeval *sv) if (sv == NULL) sv = &tv; microuptime(sv); - return (tcp_tv_to_hptstick(sv)); + return (tcp_tv_to_hpts_slot(sv)); } static inline uint64_t diff --git a/sys/netinet/tcp_stacks/bbr.c b/sys/netinet/tcp_stacks/bbr.c index 0a5888dae9db..ce4e9f30020c 100644 --- a/sys/netinet/tcp_stacks/bbr.c +++ b/sys/netinet/tcp_stacks/bbr.c @@ -14123,17 +14123,17 @@ bbr_switch_failed(struct tcpcb *tp) toval = bbr->rc_pacer_started - cts; } else { /* one slot please */ - toval = HPTS_TICKS_PER_SLOT; + toval = HPTS_USECS_PER_SLOT; } } else if (bbr->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { if (TSTMP_GT(bbr->r_ctl.rc_timer_exp, cts)) { toval = bbr->r_ctl.rc_timer_exp - cts; } else { /* one slot please */ - toval = HPTS_TICKS_PER_SLOT; + toval = HPTS_USECS_PER_SLOT; } } else - toval = HPTS_TICKS_PER_SLOT; + toval = HPTS_USECS_PER_SLOT; (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), __LINE__, &diag); bbr_log_hpts_diag(bbr, cts, &diag); diff --git a/sys/netinet/tcp_stacks/rack.c b/sys/netinet/tcp_stacks/rack.c index a411162d275d..d6bbfeb886d9 100644 --- a/sys/netinet/tcp_stacks/rack.c +++ b/sys/netinet/tcp_stacks/rack.c @@ -6592,22 +6592,22 @@ rack_start_hpts_timer (struct tcp_rack *rack, struct tcpcb *tp, uint32_t cts, * on the clock. We always have a min * 10 slots (10 x 10 i.e. 100 usecs). */ - if (slot <= HPTS_TICKS_PER_SLOT) { + if (slot <= HPTS_USECS_PER_SLOT) { /* We gain delay */ - rack->r_ctl.rc_agg_delayed += (HPTS_TICKS_PER_SLOT - slot); - slot = HPTS_TICKS_PER_SLOT; + rack->r_ctl.rc_agg_delayed += (HPTS_USECS_PER_SLOT - slot); + slot = HPTS_USECS_PER_SLOT; } else { /* We take off some */ - rack->r_ctl.rc_agg_delayed -= (slot - HPTS_TICKS_PER_SLOT); - slot = HPTS_TICKS_PER_SLOT; + rack->r_ctl.rc_agg_delayed -= (slot - HPTS_USECS_PER_SLOT); + slot = HPTS_USECS_PER_SLOT; } } else { slot -= rack->r_ctl.rc_agg_delayed; rack->r_ctl.rc_agg_delayed = 0; /* Make sure we have 100 useconds at minimum */ - if (slot < HPTS_TICKS_PER_SLOT) { - rack->r_ctl.rc_agg_delayed = HPTS_TICKS_PER_SLOT - slot; - slot = HPTS_TICKS_PER_SLOT; + if (slot < HPTS_USECS_PER_SLOT) { + rack->r_ctl.rc_agg_delayed = HPTS_USECS_PER_SLOT - slot; + slot = HPTS_USECS_PER_SLOT; } if (rack->r_ctl.rc_agg_delayed == 0) rack->r_late = 0; @@ -14368,17 +14368,17 @@ rack_switch_failed(struct tcpcb *tp) toval = rack->r_ctl.rc_last_output_to - cts; } else { /* one slot please */ - toval = HPTS_TICKS_PER_SLOT; + toval = HPTS_USECS_PER_SLOT; } } else if (rack->r_ctl.rc_hpts_flags & PACE_TMR_MASK) { if (TSTMP_GT(rack->r_ctl.rc_timer_exp, cts)) { toval = rack->r_ctl.rc_timer_exp - cts; } else { /* one slot please */ - toval = HPTS_TICKS_PER_SLOT; + toval = HPTS_USECS_PER_SLOT; } } else - toval = HPTS_TICKS_PER_SLOT; + toval = HPTS_USECS_PER_SLOT; (void)tcp_hpts_insert_diag(tp, HPTS_USEC_TO_SLOTS(toval), __LINE__, &diag); rack_log_hpts_diag(rack, cts, &diag, &tv); @@ -14957,7 +14957,7 @@ rack_init(struct tcpcb *tp, void **ptr) if (TSTMP_GT(qr.timer_pacing_to, us_cts)) tov = qr.timer_pacing_to - us_cts; else - tov = HPTS_TICKS_PER_SLOT; + tov = HPTS_USECS_PER_SLOT; } if (qr.timer_hpts_flags & PACE_TMR_MASK) { rack->r_ctl.rc_timer_exp = qr.timer_timer_exp; @@ -14965,7 +14965,7 @@ rack_init(struct tcpcb *tp, void **ptr) if (TSTMP_GT(qr.timer_timer_exp, us_cts)) tov = qr.timer_timer_exp - us_cts; else - tov = HPTS_TICKS_PER_SLOT; + tov = HPTS_USECS_PER_SLOT; } } rack_log_chg_info(tp, rack, 4,