On 01/18/2017 01:34 AM, Daniele Di Proietto wrote:
> 2017-01-17 11:43 GMT-08:00 Kevin Traynor <[email protected]>:
>> On 01/17/2017 05:43 PM, Ciara Loftus wrote:
>>> Instead of counting all polling cycles as processing cycles, only count
>>> the cycles where packets were received from the polling.
>>
>> This makes these stats much clearer. One minor comment below, other than
>> that
>>
>> Acked-by: Kevin Traynor <[email protected]>
>>
>>>
>>> Signed-off-by: Georg Schmuecking <[email protected]>
>>> Signed-off-by: Ciara Loftus <[email protected]>
>>> Co-authored-by: Ciara Loftus <[email protected]>
>
> Minor: the co-authored-by tag should be different from the main author.
>
> This makes it easier to understand how busy a pmd thread is, a valid question
> that a sysadmin might have.
>
> The counters were originally introduced to help developers understand how
> cycles
> are spent between drivers(netdev rx) and datapath processing(dpif).
> Do you think
> it's ok to lose this type of information? Perhaps it is, since a
> developer can also
> use a profiler, I'm not sure.
>
> Maybe we could 'last_cycles' as it is and introduce a separate counter to get
> the idle/busy ratio. I'm not 100% sure this is the best way.
>
> What do you guys think?
>
I've only ever used the current stats for trying to estimate if polling
was getting packets or not, so the addition of an idle stat helps that.
I like your suggestion of having all three stats, so then it would be
something like:
polling unsuccessful (idle)
polling successful (got pkts)
processing pkts
That would keep the info for a developer and it could help initial debug
if pkt rates drop on a pmd.
Kevin.
> Thanks,
>
> Daniele
>
>>> ---
>>> v2:
>>> - Rebase
>>> ---
>>> lib/dpif-netdev.c | 57
>>> ++++++++++++++++++++++++++++++++++++++++++-------------
>>> 1 file changed, 44 insertions(+), 13 deletions(-)
>>>
>>> diff --git a/lib/dpif-netdev.c b/lib/dpif-netdev.c
>>> index 3901129..3854c79 100644
>>> --- a/lib/dpif-netdev.c
>>> +++ b/lib/dpif-netdev.c
>>> @@ -272,7 +272,10 @@ enum dp_stat_type {
>>>
>>> enum pmd_cycles_counter_type {
>>> PMD_CYCLES_POLLING, /* Cycles spent polling NICs. */
>>
>> this is not used anymore and can be removed
>>
>>> - PMD_CYCLES_PROCESSING, /* Cycles spent processing packets */
>>> + PMD_CYCLES_IDLE, /* Cycles spent idle or unsuccessful
>>> polling */
>>> + PMD_CYCLES_PROCESSING, /* Cycles spent successfully polling and
>>> + * processing polled packets */
>>> +
>>> PMD_N_CYCLES
>>> };
>>>
>>> @@ -747,10 +750,10 @@ pmd_info_show_stats(struct ds *reply,
>>> }
>>>
>>> ds_put_format(reply,
>>> - "\tpolling cycles:%"PRIu64" (%.02f%%)\n"
>>> + "\tidle cycles:%"PRIu64" (%.02f%%)\n"
>>> "\tprocessing cycles:%"PRIu64" (%.02f%%)\n",
>>> - cycles[PMD_CYCLES_POLLING],
>>> - cycles[PMD_CYCLES_POLLING] / (double)total_cycles * 100,
>>> + cycles[PMD_CYCLES_IDLE],
>>> + cycles[PMD_CYCLES_IDLE] / (double)total_cycles * 100,
>>> cycles[PMD_CYCLES_PROCESSING],
>>> cycles[PMD_CYCLES_PROCESSING] / (double)total_cycles *
>>> 100);
>>>
>>> @@ -2892,30 +2895,43 @@ cycles_count_end(struct dp_netdev_pmd_thread *pmd,
>>> non_atomic_ullong_add(&pmd->cycles.n[type], interval);
>>> }
>>>
>>> -static void
>>> +/* Calculate the intermediate cycle result and add to the counter 'type' */
>>> +static inline void
>>> +cycles_count_intermediate(struct dp_netdev_pmd_thread *pmd,
>>> + enum pmd_cycles_counter_type type)
>
> I'd add an OVS_REQUIRES(&cycles_counter_fake_mutex)
>
>>> + OVS_NO_THREAD_SAFETY_ANALYSIS
>>> +{
>>> + unsigned long long new_cycles = cycles_counter();
>>> + unsigned long long interval = new_cycles - pmd->last_cycles;
>>> + pmd->last_cycles = new_cycles;
>>> +
>>> + non_atomic_ullong_add(&pmd->cycles.n[type], interval);
>>> +}
>>> +
>>> +static int
>>> dp_netdev_process_rxq_port(struct dp_netdev_pmd_thread *pmd,
>>> struct netdev_rxq *rx,
>>> odp_port_t port_no)
>>> {
>>> struct dp_packet_batch batch;
>>> int error;
>>> + int batch_cnt = 0;
>>>
>>> dp_packet_batch_init(&batch);
>>> - cycles_count_start(pmd);
>>> error = netdev_rxq_recv(rx, &batch);
>>> - cycles_count_end(pmd, PMD_CYCLES_POLLING);
>>> if (!error) {
>>> *recirc_depth_get() = 0;
>>>
>>> - cycles_count_start(pmd);
>>> + batch_cnt = batch.count;
>>> dp_netdev_input(pmd, &batch, port_no);
>>> - cycles_count_end(pmd, PMD_CYCLES_PROCESSING);
>>> } else if (error != EAGAIN && error != EOPNOTSUPP) {
>>> static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 5);
>>>
>>> VLOG_ERR_RL(&rl, "error receiving data from %s: %s",
>>> netdev_rxq_get_name(rx), ovs_strerror(error));
>>> }
>>> +
>>> + return batch_cnt;
>>> }
>>>
>>> static struct tx_port *
>>> @@ -3377,21 +3393,27 @@ dpif_netdev_run(struct dpif *dpif)
>>> struct dp_netdev *dp = get_dp_netdev(dpif);
>>> struct dp_netdev_pmd_thread *non_pmd;
>>> uint64_t new_tnl_seq;
>>> + int process_packets = 0;
>>>
>>> ovs_mutex_lock(&dp->port_mutex);
>>> non_pmd = dp_netdev_get_pmd(dp, NON_PMD_CORE_ID);
>>> if (non_pmd) {
>>> ovs_mutex_lock(&dp->non_pmd_mutex);
>>> + cycles_count_start(non_pmd);
>>> HMAP_FOR_EACH (port, node, &dp->ports) {
>>> if (!netdev_is_pmd(port->netdev)) {
>>> int i;
>>>
>>> for (i = 0; i < port->n_rxq; i++) {
>>> - dp_netdev_process_rxq_port(non_pmd, port->rxqs[i].rx,
>>> - port->port_no);
>>> + process_packets +=
>>> + dp_netdev_process_rxq_port(non_pmd,
>>> + port->rxqs[i].rx,
>>> + port->port_no);
>>> }
>>> }
>>> }
>>> + cycles_count_end(non_pmd, process_packets ? PMD_CYCLES_PROCESSING
>>> + : PMD_CYCLES_IDLE);
>>> dpif_netdev_xps_revalidate_pmd(non_pmd, time_msec(), false);
>>> ovs_mutex_unlock(&dp->non_pmd_mutex);
>>>
>>> @@ -3516,6 +3538,7 @@ pmd_thread_main(void *f_)
>>> bool exiting;
>>> int poll_cnt;
>>> int i;
>>> + int process_packets = 0;
>>>
>>> poll_list = NULL;
>>>
>>> @@ -3542,10 +3565,12 @@ reload:
>>> lc = UINT_MAX;
>>> }
>>>
>>> + cycles_count_start(pmd);
>>> for (;;) {
>>> for (i = 0; i < poll_cnt; i++) {
>>> - dp_netdev_process_rxq_port(pmd, poll_list[i].rx,
>>> - poll_list[i].port_no);
>>> + process_packets +=
>>> + dp_netdev_process_rxq_port(pmd, poll_list[i].rx,
>>> + poll_list[i].port_no);
>>> }
>>>
>>> if (lc++ > 1024) {
>>> @@ -3564,8 +3589,14 @@ reload:
>>> break;
>>> }
>>> }
>>> + cycles_count_intermediate(pmd, process_packets ?
>>> PMD_CYCLES_PROCESSING
>>> + : PMD_CYCLES_IDLE);
>>> + process_packets = 0;
>>> }
>>>
>>> + cycles_count_end(pmd, process_packets ? PMD_CYCLES_PROCESSING
>>> + : PMD_CYCLES_IDLE);
>>> +
>>> poll_cnt = pmd_load_queues_and_ports(pmd, &poll_list);
>>> exiting = latch_is_set(&pmd->exit_latch);
>>> /* Signal here to make sure the pmd finishes
>>>
>>
>> _______________________________________________
>> dev mailing list
>> [email protected]
>> https://mail.openvswitch.org/mailman/listinfo/ovs-dev
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev