In a few places the list sizes are checked without holding the lock.
While this probably speeds up the processing a bit, it's technically
incorrect as the access is not atomic. Caught by thread-safety
analyzer of clang 21, as it now properly supports GUARDED annotations
on structure fields:
lib/ipf.c:1117:33:
error: reading the value pointed to by 'frag_exp_list' requires
holding any mutex [-Werror,-Wthread-safety-analysis]
1117 | if (ovs_list_is_empty(&ipf->frag_exp_list)) {
| ^
lib/ipf.c:1154:33:
error: reading the value pointed to by 'reassembled_pkt_list'
requires holding any mutex [-Werror,-Wthread-safety-analysis]
1154 | if (ovs_list_is_empty(&ipf->reassembled_pkt_list)) {
| ^
Not trying to fix the actual non-atomic access to the list as getting
the wrong result in these particular calls will not meaningfully
impact the correctness of the code, but taking the lock before checking
will likely reduce performance.
For now, just working around the problem by turning off thread safety
analysis for these particular calls. The new _unsafe() access function
is also a bit more obvious in a way that it's more clear that these
calls should not be relied upon for logic that requires precision.
Signed-off-by: Ilya Maximets <[email protected]>
---
Version 2:
* Split the patch and changed the approach from taking a lock to
just working around the warnings.
Version 1:
*
https://patchwork.ozlabs.org/project/openvswitch/patch/[email protected]/
lib/ipf.c | 28 ++++++++++++++++++++++------
1 file changed, 22 insertions(+), 6 deletions(-)
diff --git a/lib/ipf.c b/lib/ipf.c
index b16797312..d72b49563 100644
--- a/lib/ipf.c
+++ b/lib/ipf.c
@@ -302,6 +302,22 @@ ipf_reassembled_list_remove(struct reassembled_pkt *rp)
ovs_list_remove(&rp->rp_list_node);
}
+/* Normally, access to ipf lists requires holding ipf_lock. This is a special
+ * function to work around this restriction. As a consequence of not taking
+ * any locks, the result of this function can be wrong. Must only be used in
+ * cases where the wrong result doesn't impact the overall correctness of the
+ * logic, e.g., a quick check if there is any work to be done on current
+ * iteration that otherwise would block all other threads by taking a lock.
+ *
+ * XXX: We need a proper thread-safe solution for this instead.
+ */
+static bool
+ipf_list_is_empty_unsafe(struct ovs_list *list)
+ OVS_NO_THREAD_SAFETY_ANALYSIS
+{
+ return ovs_list_is_empty(list);
+}
+
/* Symmetric */
static uint32_t
ipf_list_key_hash(const struct ipf_list_key *key, uint32_t basis)
@@ -1071,7 +1087,7 @@ ipf_send_completed_frags(struct ipf *ipf, struct
dp_packet_batch *pb,
long long now, bool v6, uint16_t zone,
odp_port_t in_port)
{
- if (ovs_list_is_empty(&ipf->frag_complete_list)) {
+ if (ipf_list_is_empty_unsafe(&ipf->frag_complete_list)) {
return;
}
@@ -1114,7 +1130,7 @@ ipf_delete_expired_frags(struct ipf *ipf, long long now)
};
- if (ovs_list_is_empty(&ipf->frag_exp_list)) {
+ if (ipf_list_is_empty_unsafe(&ipf->frag_exp_list)) {
return;
}
@@ -1151,7 +1167,7 @@ static void
ipf_execute_reass_pkts(struct ipf *ipf, struct dp_packet_batch *pb,
ovs_be16 dl_type)
{
- if (ovs_list_is_empty(&ipf->reassembled_pkt_list)) {
+ if (ipf_list_is_empty_unsafe(&ipf->reassembled_pkt_list)) {
return;
}
@@ -1175,7 +1191,7 @@ static void
ipf_post_execute_reass_pkts(struct ipf *ipf,
struct dp_packet_batch *pb, bool v6)
{
- if (ovs_list_is_empty(&ipf->reassembled_pkt_list)) {
+ if (ipf_list_is_empty_unsafe(&ipf->reassembled_pkt_list)) {
return;
}
@@ -1308,8 +1324,8 @@ ipf_clean_thread_main(void *f)
long long now = time_msec();
- if (!ovs_list_is_empty(&ipf->frag_exp_list) ||
- !ovs_list_is_empty(&ipf->frag_complete_list)) {
+ if (!ipf_list_is_empty_unsafe(&ipf->frag_exp_list) ||
+ !ipf_list_is_empty_unsafe(&ipf->frag_complete_list)) {
ovs_mutex_lock(&ipf->ipf_lock);
--
2.51.0
_______________________________________________
dev mailing list
[email protected]
https://mail.openvswitch.org/mailman/listinfo/ovs-dev