From: Nikhil Agarwal <[email protected]> The existing implementation uses atomic queues to update the sequence number. In this particular case atomic queue are not giving any advantage over the atomic variable based implementation. Atomic queues have too much overhead for short critical section.
The incoming queues are already atomic/ordered queues. When CPU has done the first stage of processing and need to allocate the sequence number, it can allocate the sequence number atomically. Since the packets come from ordered queues, packet ordering will be maintained. No need to again use atomic queues just for sequence number update. It is an overkill. After the sequence number update, packets will be submitted to SEC for crypto operation atomically. It makes no difference in ordering, whether you use atomic queue based sequence number update, or atomically increase sequence number. Signed-off-by: Nikhil Agarwal <[email protected]> --- example/ipsec/odp_ipsec.c | 13 +++++++------ example/ipsec/odp_ipsec_cache.c | 4 ++-- example/ipsec/odp_ipsec_cache.h | 4 ++-- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c index cb8f535..1f5bd46 100644 --- a/example/ipsec/odp_ipsec.c +++ b/example/ipsec/odp_ipsec.c @@ -140,8 +140,8 @@ typedef struct { /* Output only */ odp_crypto_op_params_t params; /**< Parameters for crypto call */ - uint32_t *ah_seq; /**< AH sequence number location */ - uint32_t *esp_seq; /**< ESP sequence number location */ + odp_atomic_u32_t *ah_seq; /**< AH sequence number location */ + odp_atomic_u32_t *esp_seq; /**< ESP sequence number location */ } ipsec_ctx_t; /** @@ -881,7 +881,7 @@ pkt_disposition_e do_ipsec_out_classify(odp_packet_t pkt, *skip = FALSE; - return PKT_POSTED; + return PKT_CONTINUE; } /** @@ -907,13 +907,15 @@ pkt_disposition_e do_ipsec_out_seq(odp_packet_t pkt, odph_ahhdr_t *ah; ah = (odph_ahhdr_t *)(ctx->ipsec.ah_offset + buf); - ah->seq_no = odp_cpu_to_be_32((*ctx->ipsec.ah_seq)++); + ah->seq_no = odp_cpu_to_be_32 + (odp_atomic_fetch_inc_u32(ctx->ipsec.ah_seq)); } if (ctx->ipsec.esp_offset) { odph_esphdr_t *esp; esp = (odph_esphdr_t *)(ctx->ipsec.esp_offset + buf); - esp->seq_no = odp_cpu_to_be_32((*ctx->ipsec.esp_seq)++); + esp->seq_no = odp_cpu_to_be_32 + (odp_atomic_fetch_inc_u32(ctx->ipsec.esp_seq)); } /* Issue crypto request */ @@ -1078,7 +1080,6 @@ void *pktio_thread(void *arg EXAMPLE_UNUSED) ctx->state = PKT_STATE_TRANSMIT; } else { ctx->state = PKT_STATE_IPSEC_OUT_SEQ; - odp_queue_enq(seqnumq, ev); } break; diff --git a/example/ipsec/odp_ipsec_cache.c b/example/ipsec/odp_ipsec_cache.c index 12b960d..8bad122 100644 --- a/example/ipsec/odp_ipsec_cache.c +++ b/example/ipsec/odp_ipsec_cache.c @@ -129,8 +129,8 @@ int create_ipsec_cache_entry(sa_db_entry_t *cipher_sa, } /* Initialize state */ - entry->state.esp_seq = 0; - entry->state.ah_seq = 0; + odp_atomic_init_u32(&entry->state.esp_seq, 0); + odp_atomic_init_u32(&entry->state.ah_seq, 0); entry->state.session = session; /* Add entry to the appropriate list */ diff --git a/example/ipsec/odp_ipsec_cache.h b/example/ipsec/odp_ipsec_cache.h index 714cae8..a25e3e8 100644 --- a/example/ipsec/odp_ipsec_cache.h +++ b/example/ipsec/odp_ipsec_cache.h @@ -51,8 +51,8 @@ typedef struct ipsec_cache_entry_s { /* Per SA state */ struct { odp_crypto_session_t session; /**< Crypto session handle */ - uint32_t esp_seq; /**< ESP TX sequence number */ - uint32_t ah_seq; /**< AH TX sequence number */ + odp_atomic_u32_t esp_seq; /**< ESP TX sequence number */ + odp_atomic_u32_t ah_seq; /**< AH TX sequence number */ uint8_t iv[MAX_IV_LEN]; /**< ESP IV storage */ } state; } ipsec_cache_entry_t; -- 1.7.9.5 _______________________________________________ lng-odp mailing list [email protected] https://lists.linaro.org/mailman/listinfo/lng-odp
