Hello Petr Pavlu,
Commit 1f1c2bc9d075 ("ring-buffer: Limit time with disabled
interrupts in rb_check_pages()") from Jul 15, 2024 (linux-next),
leads to the following Smatch static checker warning:
kernel/trace/ring_buffer.c:1540 rb_check_pages()
warn: ignoring unreachable code.
kernel/trace/ring_buffer.c
1501 static void rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1502 {
1503 struct list_head *head, *tmp;
1504 unsigned long buffer_cnt;
1505 unsigned long flags;
1506 int nr_loops = 0;
1507
1508 /*
1509 * Walk the linked list underpinning the ring buffer and
validate all
1510 * its next and prev links.
1511 *
1512 * The check acquires the reader_lock to avoid concurrent
processing
1513 * with code that could be modifying the list. However, the
lock cannot
1514 * be held for the entire duration of the walk, as this would
make the
1515 * time when interrupts are disabled non-deterministic,
dependent on the
1516 * ring buffer size. Therefore, the code releases and
re-acquires the
1517 * lock after checking each page. The ring_buffer_per_cpu.cnt
variable
1518 * is then used to detect if the list was modified while the
lock was
1519 * not held, in which case the check needs to be restarted.
1520 *
1521 * The code attempts to perform the check at most three times
before
1522 * giving up. This is acceptable because this is only a
self-validation
1523 * to detect problems early on. In practice, the list
modification
1524 * operations are fairly spaced, and so this check typically
succeeds at
1525 * most on the second try.
1526 */
1527 again:
1528 if (++nr_loops > 3)
1529 return;
1530
1531 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1532 head = rb_list_head(cpu_buffer->pages);
1533 if (!rb_check_links(cpu_buffer, head))
1534 goto out_locked;
1535 buffer_cnt = cpu_buffer->cnt;
1536 tmp = head;
1537 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1538 return;
^^^^^^
You probably intended to delete this return?
1539
--> 1540 while (true) {
1541 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1542
1543 if (buffer_cnt != cpu_buffer->cnt) {
1544 /* The list was updated, try again. */
1545
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1546 goto again;
1547 }
1548
1549 tmp = rb_list_head(tmp->next);
1550 if (tmp == head)
1551 /* The iteration circled back, all is done. */
1552 goto out_locked;
1553
1554 if (!rb_check_links(cpu_buffer, tmp))
1555 goto out_locked;
1556
1557 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock,
flags);
1558 }
1559
1560 out_locked:
1561 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1562 }
regards,
dan carpenter