On 06/06/14 at 02:37pm, Andy Zhou wrote:
> +static void tbl_mask_array_delete_mask(struct mask_array *ma,
> + const struct sw_flow_mask *mask)
> +{
> + int i = 0;
> +
> + /* Delete a mask pointer from the valid section.
> + *
> + * Also move the last entry in its place, so there is no
> + * whole in the valid section.
> + *
> + * Notice the last entry still points to the original mask.
> + *
> + * <Note>: there is a small race window that may cause a mask
> + * to be missed in a search. Imaging a core is
> + * walking through the array, passing the index of deleting mask.
> + * But before reaching the last entry, it is overwritten,
> + * by another core that is adding a new mask, now the last entry
> + * will point to the new mask. In this case, the moved up last
> + * entry can be missed by the core walking the mask array.
> + *
> + * In case this missed mask would have led to successful
> + * lookup, Hitting the race window could cause a packet to miss
> + * kernel flow cache, and be sent to the user space.
> + * </Note>
> + */
> + while (i < ma->count - 1) {
I think this should be coded as a for (;;) loop instead of
incrementing `i` in the else branch.
> + if (mask == ma->masks[i]) {
> + struct sw_flow_mask *last;
> +
> + last = ma->masks[ma->count - 1];
> + rcu_assign_pointer(ma->masks[i], last);
> + ma->count--;
Since you enter the loop only for count > 1, deleting the last
flow mask will leave a count = 1.
> + break;
> + } else
> + i++;
> + }
> +
> + /* Remove the deleted mask pointers from the invalid section. */
> + for (; i < ma->max; i++)
> + if (mask == ma->masks[i])
> + RCU_INIT_POINTER(ma->masks[i], NULL);
> +}
> +
> +static int tbl_mask_array_find_idx(struct mask_array *ma,
> + const struct sw_flow_mask *mask)
Looks like this should have made the first patch.
> +{
> + int i;
> +
> + for (i = 0; i < ma->count; i++)
> + if (mask == ovsl_dereference(ma->masks[i]))
> + return i;
> +
> + return -1;
> +}
> +
> int ovs_flow_tbl_init(struct flow_table *table)
> {
> struct table_instance *ti;
> @@ -524,7 +579,7 @@ static struct sw_flow *flow_lookup(struct flow_table *tbl,
> struct sw_flow *flow;
> int i;
>
> - for (i = 0; i < ma->max; i++) {
> + for (i = 0; i < ma->count; i++) {
> struct sw_flow_mask *mask;
>
> mask = rcu_dereference_ovsl(ma->masks[i]);
> @@ -578,10 +633,21 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct
> flow_table *tbl,
>
> e = &entries[index];
> if (e->skb_hash == skb_hash) {
> - cache = rcu_dereference_ovsl(ma->masks[e->mask_index]);
> - if (cache)
> - if (tbl_mask_array_find_idx(ma, cache) < 0)
> + int i = e->mask_index;
> +
> + if (i < ma->max)
> + cache = rcu_dereference_ovsl(ma->masks[i]);
> +
> + /* If the the cache index is outside of the valid
> + * region, update the index in case cache entry
> + * was moved up. */
> + if (cache && i >= ma->count) {
How about adding an unlikely() here since this is in the super fast
path but unlikely to be true?
> + i = tbl_mask_array_find_idx(ma, cache);
> + if (i < 0)
> cache = NULL;
> + else
> + e->mask_index = i;
> + }
>
> if (!cache)
> e->skb_hash = 0; /* Not a valid cache entry. */
_______________________________________________
dev mailing list
[email protected]
http://openvswitch.org/mailman/listinfo/dev