Re: [devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
Hi bro.Vu, Thanks for comments. Seems they are not really matter change or not, can we keep current patch? Then we don't need push another PS for adaption patch in our repo. Best Regards, ThuanTr -Original Message- From: Vu Minh Nguyen Sent: Thursday, April 25, 2019 1:51 PM To: 'thuan.tran' ; hans.nordeb...@ericsson.com; 'Minh Hon Chau' Cc: opensaf-devel@lists.sourceforge.net Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages [#3033] Hi Thuan, Ack with minor comments below. Regards, Vu > -Original Message- > From: thuan.tran > Sent: Wednesday, April 24, 2019 6:06 PM > To: 'Vu Minh Nguyen' ; > hans.nordeb...@ericsson.com; Minh Hon Chau > Cc: opensaf-devel@lists.sourceforge.net; thuan.tran > > Subject: [PATCH 1/1] mds: support multicast fragmented messages > [#3033] > > - Sender may send broadcast big messages (> 65K) then small messages > (< 65K). > Current MDS just loop via all destinations to unicast all fragmented messages > to one by one destinations. But sending multicast non-fragment > messages to all destinations. Therefor, receivers may get messages > with incorrect order, non-fragment messages may come before fragmented > messages. > For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. > - Solution: support send multicast each fragmented messages to avoid > disorder of arrived broadcast messages. > --- > src/mds/mds_c_sndrcv.c | 3 +- > src/mds/mds_dt_tipc.c | 104 +++ > -- > 2 files changed, 40 insertions(+), 67 deletions(-) > > diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c index > 703bc8e..7850ac7 100644 > --- a/src/mds/mds_c_sndrcv.c > +++ b/src/mds/mds_c_sndrcv.c > @@ -4496,8 +4496,7 @@ static uint32_t > mcm_pvt_process_svc_bcast_common( > info_result->key.vdest_id, req, 0, > info_result->key.adest, pri); > if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && > - (tipc_mode_enabled) && (tipc_mcast_enabled) && > - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { > + (tipc_mode_enabled) && (tipc_mcast_enabled)) { > m_MDS_LOG_DBG( > "MDTM: Break while(1) prev_ver_sub_count: %d svc_id =%s(%d) > to_msg.bcast_buff_len: %d ", > svc_cb->subtn_info->prev_ver_sub_count, > diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c index > a3abff5..d8f8c78 100644 > --- a/src/mds/mds_dt_tipc.c > +++ b/src/mds/mds_dt_tipc.c > @@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, > uint32_t seq_num, > uint16_t frag_val = 0; > uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; > int version = req->msg_arch_word & 0x7; > + uint32_t ret = NCSCC_RC_SUCCESS; > > if (version > 1) { > sum_mds_hdr_plus_mdtm_hdr_plus_len = @@ -2914,95 +2915,66 @@ > uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, > frag_val = NO_FRAG_BIT | i; > } > { > + uint32_t hdr_plus = (i == 1) ? > + sum_mds_hdr_plus_mdtm_hdr_plus_len : > MDTM_FRAG_HDR_PLUS_LEN_2; > uint8_t *body = NULL; > body = calloc(1, len_buf); > + p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, > len_buf - hdr_plus, > + (char *)(body + hdr_plus)); > + if (p8 != (body + hdr_plus)) > + memcpy((body + hdr_plus), p8, len_buf - > hdr_plus); > if (i == 1) { [Vu] is it better if we combine these 02 if into one, as: If ((i === 1) && NCSCC_RC_SUCCESS != mdtm_add_mds_hdr(body, req)) { // error handling. } [Thuan] I think it is not really a matter, it's same. > - p8 = (uint8_t *)m_MMGR_DATA_AT_START( > - usrbuf, > - (len_buf - > - sum_mds_hdr_plus_mdtm_hdr_plus_len), > - (char > - *)(body + > - > sum_mds_hdr_plus_mdtm_hdr_plus_len)); > - > - if (p8 != > - (body + > sum_mds_hdr_plus_mdtm_hdr_plus_len)) > - memcpy( > - (body + > - > sum_mds_hdr_plus_mdtm_hdr_plus_len), > - p8, > - (len_buf - > - > sum_mds_hdr_plus_mdtm_hdr_plus_len)); > - > if (NCSCC_RC_SUCCESS != > mdtm_add_mds_hdr(body, req)) { > m_MDS_LOG_ERR( > "MDTM: frg MDS hdr addition > failed\n"); > -
Re: [devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
Hi Thuan, Ack with minor comments below. Regards, Vu > -Original Message- > From: thuan.tran > Sent: Wednesday, April 24, 2019 6:06 PM > To: 'Vu Minh Nguyen' ; > hans.nordeb...@ericsson.com; Minh Hon Chau > > Cc: opensaf-devel@lists.sourceforge.net; thuan.tran > > Subject: [PATCH 1/1] mds: support multicast fragmented messages [#3033] > > - Sender may send broadcast big messages (> 65K) then small messages (< > 65K). > Current MDS just loop via all destinations to unicast all fragmented messages > to one by one destinations. But sending multicast non-fragment messages to > all > destinations. Therefor, receivers may get messages with incorrect order, > non-fragment messages may come before fragmented messages. > For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. > - Solution: support send multicast each fragmented messages to avoid > disorder of arrived broadcast messages. > --- > src/mds/mds_c_sndrcv.c | 3 +- > src/mds/mds_dt_tipc.c | 104 +++ > -- > 2 files changed, 40 insertions(+), 67 deletions(-) > > diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c > index 703bc8e..7850ac7 100644 > --- a/src/mds/mds_c_sndrcv.c > +++ b/src/mds/mds_c_sndrcv.c > @@ -4496,8 +4496,7 @@ static uint32_t > mcm_pvt_process_svc_bcast_common( > info_result->key.vdest_id, req, 0, > info_result->key.adest, pri); > if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && > - (tipc_mode_enabled) && (tipc_mcast_enabled) && > - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { > + (tipc_mode_enabled) && (tipc_mcast_enabled)) { > m_MDS_LOG_DBG( > "MDTM: Break while(1) prev_ver_sub_count: %d > svc_id =%s(%d) to_msg.bcast_buff_len: %d ", > svc_cb->subtn_info->prev_ver_sub_count, > diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c > index a3abff5..d8f8c78 100644 > --- a/src/mds/mds_dt_tipc.c > +++ b/src/mds/mds_dt_tipc.c > @@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ > *req, uint32_t seq_num, > uint16_t frag_val = 0; > uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; > int version = req->msg_arch_word & 0x7; > + uint32_t ret = NCSCC_RC_SUCCESS; > > if (version > 1) { > sum_mds_hdr_plus_mdtm_hdr_plus_len = > @@ -2914,95 +2915,66 @@ uint32_t > mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, > frag_val = NO_FRAG_BIT | i; > } > { > + uint32_t hdr_plus = (i == 1) ? > + sum_mds_hdr_plus_mdtm_hdr_plus_len : > MDTM_FRAG_HDR_PLUS_LEN_2; > uint8_t *body = NULL; > body = calloc(1, len_buf); > + p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, > len_buf - hdr_plus, > + (char *)(body + hdr_plus)); > + if (p8 != (body + hdr_plus)) > + memcpy((body + hdr_plus), p8, len_buf - > hdr_plus); > if (i == 1) { [Vu] is it better if we combine these 02 if into one, as: If ((i === 1) && NCSCC_RC_SUCCESS != mdtm_add_mds_hdr(body, req)) { // error handling. } > - p8 = (uint8_t *)m_MMGR_DATA_AT_START( > - usrbuf, > - (len_buf - > - sum_mds_hdr_plus_mdtm_hdr_plus_len), > - (char > - *)(body + > - > sum_mds_hdr_plus_mdtm_hdr_plus_len)); > - > - if (p8 != > - (body + > sum_mds_hdr_plus_mdtm_hdr_plus_len)) > - memcpy( > - (body + > - > sum_mds_hdr_plus_mdtm_hdr_plus_len), > - p8, > - (len_buf - > - > sum_mds_hdr_plus_mdtm_hdr_plus_len)); > - > if (NCSCC_RC_SUCCESS != > mdtm_add_mds_hdr(body, req)) { > m_MDS_LOG_ERR( > "MDTM: frg MDS hdr addition > failed\n"); > - free(body); > - m_MMGR_FREE_BUFR_LIST(usrbuf); > - return NCSCC_RC_FAILURE; > - } > - > - if (NCSCC_RC_SUCCESS != > - mdtm_add_frag_hdr(body, len_buf, seq_num, > - frag_val)) { > - m_MDS_LOG_ERR( > - "MDTM: Frag
[devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
- Sender may send broadcast big messages (> 65K) then small messages (< 65K). Current MDS just loop via all destinations to unicast all fragmented messages to one by one destinations. But sending multicast non-fragment messages to all destinations. Therefor, receivers may get messages with incorrect order, non-fragment messages may come before fragmented messages. For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. - Solution: support send multicast each fragmented messages to avoid disorder of arrived broadcast messages. --- src/mds/mds_c_sndrcv.c | 3 +- src/mds/mds_dt_tipc.c | 104 +++-- 2 files changed, 40 insertions(+), 67 deletions(-) diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c index 703bc8e..7850ac7 100644 --- a/src/mds/mds_c_sndrcv.c +++ b/src/mds/mds_c_sndrcv.c @@ -4496,8 +4496,7 @@ static uint32_t mcm_pvt_process_svc_bcast_common( info_result->key.vdest_id, req, 0, info_result->key.adest, pri); if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && - (tipc_mode_enabled) && (tipc_mcast_enabled) && - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { + (tipc_mode_enabled) && (tipc_mcast_enabled)) { m_MDS_LOG_DBG( "MDTM: Break while(1) prev_ver_sub_count: %d svc_id =%s(%d) to_msg.bcast_buff_len: %d ", svc_cb->subtn_info->prev_ver_sub_count, diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c index a3abff5..d8f8c78 100644 --- a/src/mds/mds_dt_tipc.c +++ b/src/mds/mds_dt_tipc.c @@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, uint16_t frag_val = 0; uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; int version = req->msg_arch_word & 0x7; + uint32_t ret = NCSCC_RC_SUCCESS; if (version > 1) { sum_mds_hdr_plus_mdtm_hdr_plus_len = @@ -2914,95 +2915,66 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, frag_val = NO_FRAG_BIT | i; } { + uint32_t hdr_plus = (i == 1) ? + sum_mds_hdr_plus_mdtm_hdr_plus_len : MDTM_FRAG_HDR_PLUS_LEN_2; uint8_t *body = NULL; body = calloc(1, len_buf); + p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, len_buf - hdr_plus, + (char *)(body + hdr_plus)); + if (p8 != (body + hdr_plus)) + memcpy((body + hdr_plus), p8, len_buf - hdr_plus); if (i == 1) { - p8 = (uint8_t *)m_MMGR_DATA_AT_START( - usrbuf, - (len_buf - -sum_mds_hdr_plus_mdtm_hdr_plus_len), - (char -*)(body + - sum_mds_hdr_plus_mdtm_hdr_plus_len)); - - if (p8 != - (body + sum_mds_hdr_plus_mdtm_hdr_plus_len)) - memcpy( - (body + - sum_mds_hdr_plus_mdtm_hdr_plus_len), - p8, - (len_buf - - sum_mds_hdr_plus_mdtm_hdr_plus_len)); - if (NCSCC_RC_SUCCESS != mdtm_add_mds_hdr(body, req)) { m_MDS_LOG_ERR( "MDTM: frg MDS hdr addition failed\n"); - free(body); - m_MMGR_FREE_BUFR_LIST(usrbuf); - return NCSCC_RC_FAILURE; - } - - if (NCSCC_RC_SUCCESS != - mdtm_add_frag_hdr(body, len_buf, seq_num, - frag_val)) { - m_MDS_LOG_ERR( - "MDTM: Frag hdr addition failed\n"); m_MMGR_FREE_BUFR_LIST(usrbuf); free(body); return NCSCC_RC_FAILURE; } + } + if (NCSCC_RC_SUCCESS != + mdtm_add_frag_hdr(body, len_buf, seq_num, + frag_val)) { +
Re: [devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
Hi bro.Vu, OK, you are right. It should call m_MMGR_FREE_BUFR_LIST(usrbuf); before break I will send out V2. Best Regards, ThuanTr -Original Message- From: Vu Minh Nguyen Sent: Wednesday, April 24, 2019 2:30 PM To: 'Tran Thuan' ; hans.nordeb...@ericsson.com; 'Minh Hon Chau' Cc: opensaf-devel@lists.sourceforge.net Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages [#3033] Hi Thuan, m_MMGR_REMOVE_FROM_START() is just removed n bytes from the start, not whole memory chain. You can refer to descriptions of these macros to see the differences b/w them. Regards, Vu > -Original Message- > From: Tran Thuan > Sent: Wednesday, April 24, 2019 2:17 PM > To: 'Vu Minh Nguyen' ; > hans.nordeb...@ericsson.com; 'Minh Hon Chau' c...@users.sourceforge.net> > Cc: opensaf-devel@lists.sourceforge.net > Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages > [#3033] > > Hi bro.Vu, > > I think it won't cause memleak since I break after > m_MMGR_REMOVE_FROM_START() > free(body) > > Best Regards, > ThuanTr > > -Original Message- > From: Vu Minh Nguyen > Sent: Wednesday, April 24, 2019 1:45 PM > To: 'thuan.tran' ; > hans.nordeb...@ericsson.com; 'Minh Hon Chau' > > Cc: opensaf-devel@lists.sourceforge.net > Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages > [#3033] > > Hi Thuan, > > Tested broadcasting with large packages (each 1M). > > Ack with below comments. > > Regards, Vu > > > -Original Message- > > From: thuan.tran > > Sent: Wednesday, April 24, 2019 12:00 PM > > To: hans.nordeb...@ericsson.com; Vu Minh Nguyen > > ; Minh Hon Chau > c...@users.sourceforge.net> > > Cc: opensaf-devel@lists.sourceforge.net; thuan.tran > > > > Subject: [PATCH 1/1] mds: support multicast fragmented messages > > [#3033] > > > > - Sender may send broadcast big messages (> 65K) then small messages > > (< 65K). > > Current MDS just loop via all destinations to unicast all fragmented > messages > > to one by one destinations. But sending multicast non-fragment > > messages to all destinations. Therefor, receivers may get messages > > with incorrect order, non-fragment messages may come before > fragmented > > messages. > > For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. > > - Solution: support send multicast each fragmented messages to avoid > > disorder of arrived broadcast messages. > > --- > > src/mds/mds_c_sndrcv.c | 3 +-- > > src/mds/mds_dt_tipc.c | 55 > > -- > > 2 files changed, 45 insertions(+), 13 deletions(-) > > > > diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c index > > 703bc8e..7850ac7 100644 > > --- a/src/mds/mds_c_sndrcv.c > > +++ b/src/mds/mds_c_sndrcv.c > > @@ -4496,8 +4496,7 @@ static uint32_t > > mcm_pvt_process_svc_bcast_common( > > info_result->key.vdest_id, > req, 0, > > info_result->key.adest, pri); > > if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && > > - (tipc_mode_enabled) && (tipc_mcast_enabled) && > > - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { > > + (tipc_mode_enabled) && (tipc_mcast_enabled)) { > > m_MDS_LOG_DBG( > > "MDTM: Break while(1) prev_ver_sub_count: %d > svc_id =%s(%d) > > to_msg.bcast_buff_len: %d ", > > svc_cb->subtn_info->prev_ver_sub_count, > > diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c index > > a3abff5..656f79c 100644 > > --- a/src/mds/mds_dt_tipc.c > > +++ b/src/mds/mds_dt_tipc.c > > @@ -2856,6 +2856,7 @@ uint32_t > mdtm_frag_and_send(MDTM_SEND_REQ *req, > > uint32_t seq_num, > > uint16_t frag_val = 0; > > uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; > > int version = req->msg_arch_word & 0x7; > > + uint32_t ret = NCSCC_RC_SUCCESS; > > > > if (version > 1) { > > sum_mds_hdr_plus_mdtm_hdr_plus_len = @@ -2952,11 > +2953,23 @@ > > > uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, > > free(body); > > return NCSCC_RC_FAILURE; > > } > > - m_MDS_LOG_DBG( > > - "MDTM:Sending message with Service > > Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO > > Dest_Tipc_id=<0x%08x:%u>", > > - req->svc_seq_num, seq_num, frag_val, > > - id.node, id.ref); > > - mdtm_sendto(body, len_buf, id); > > + if (((req->snd_type == MDS_SENDTYPE_RBCAST) > > || > > +(req->snd_type == MDS_SENDTYPE_BCAST)) > > && > > + (version > 0) && (tipc_mcast_enabled)) { > > + m_MDS_LOG_DBG( > > +
Re: [devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
Hi Thuan, m_MMGR_REMOVE_FROM_START() is just removed n bytes from the start, not whole memory chain. You can refer to descriptions of these macros to see the differences b/w them. Regards, Vu > -Original Message- > From: Tran Thuan > Sent: Wednesday, April 24, 2019 2:17 PM > To: 'Vu Minh Nguyen' ; > hans.nordeb...@ericsson.com; 'Minh Hon Chau' c...@users.sourceforge.net> > Cc: opensaf-devel@lists.sourceforge.net > Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages > [#3033] > > Hi bro.Vu, > > I think it won't cause memleak since I break after > m_MMGR_REMOVE_FROM_START() > free(body) > > Best Regards, > ThuanTr > > -Original Message- > From: Vu Minh Nguyen > Sent: Wednesday, April 24, 2019 1:45 PM > To: 'thuan.tran' ; > hans.nordeb...@ericsson.com; > 'Minh Hon Chau' > Cc: opensaf-devel@lists.sourceforge.net > Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages > [#3033] > > Hi Thuan, > > Tested broadcasting with large packages (each 1M). > > Ack with below comments. > > Regards, Vu > > > -Original Message- > > From: thuan.tran > > Sent: Wednesday, April 24, 2019 12:00 PM > > To: hans.nordeb...@ericsson.com; Vu Minh Nguyen > > ; Minh Hon Chau > c...@users.sourceforge.net> > > Cc: opensaf-devel@lists.sourceforge.net; thuan.tran > > > > Subject: [PATCH 1/1] mds: support multicast fragmented messages > > [#3033] > > > > - Sender may send broadcast big messages (> 65K) then small messages > > (< 65K). > > Current MDS just loop via all destinations to unicast all fragmented > messages > > to one by one destinations. But sending multicast non-fragment > > messages to all destinations. Therefor, receivers may get messages > > with incorrect order, non-fragment messages may come before > fragmented > > messages. > > For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. > > - Solution: support send multicast each fragmented messages to avoid > > disorder of arrived broadcast messages. > > --- > > src/mds/mds_c_sndrcv.c | 3 +-- > > src/mds/mds_dt_tipc.c | 55 > > -- > > 2 files changed, 45 insertions(+), 13 deletions(-) > > > > diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c index > > 703bc8e..7850ac7 100644 > > --- a/src/mds/mds_c_sndrcv.c > > +++ b/src/mds/mds_c_sndrcv.c > > @@ -4496,8 +4496,7 @@ static uint32_t > > mcm_pvt_process_svc_bcast_common( > > info_result->key.vdest_id, > req, 0, > > info_result->key.adest, pri); > > if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && > > - (tipc_mode_enabled) && (tipc_mcast_enabled) && > > - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { > > + (tipc_mode_enabled) && (tipc_mcast_enabled)) { > > m_MDS_LOG_DBG( > > "MDTM: Break while(1) prev_ver_sub_count: %d > svc_id =%s(%d) > > to_msg.bcast_buff_len: %d ", > > svc_cb->subtn_info->prev_ver_sub_count, > > diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c index > > a3abff5..656f79c 100644 > > --- a/src/mds/mds_dt_tipc.c > > +++ b/src/mds/mds_dt_tipc.c > > @@ -2856,6 +2856,7 @@ uint32_t > mdtm_frag_and_send(MDTM_SEND_REQ *req, > > uint32_t seq_num, > > uint16_t frag_val = 0; > > uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; > > int version = req->msg_arch_word & 0x7; > > + uint32_t ret = NCSCC_RC_SUCCESS; > > > > if (version > 1) { > > sum_mds_hdr_plus_mdtm_hdr_plus_len = @@ -2952,11 > +2953,23 @@ > > > uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, > > free(body); > > return NCSCC_RC_FAILURE; > > } > > - m_MDS_LOG_DBG( > > - "MDTM:Sending message with Service > > Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO > > Dest_Tipc_id=<0x%08x:%u>", > > - req->svc_seq_num, seq_num, frag_val, > > - id.node, id.ref); > > - mdtm_sendto(body, len_buf, id); > > + if (((req->snd_type == MDS_SENDTYPE_RBCAST) > > || > > +(req->snd_type == MDS_SENDTYPE_BCAST)) > > && > > + (version > 0) && (tipc_mcast_enabled)) { > > + m_MDS_LOG_DBG( > > + "MDTM:Send Multicast message > with > > Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d " > > + "From svc_id = %s(%d) TO svc_id > = > > %s(%d)", > > + req->svc_seq_num, seq_num, > > frag_val, > > + get_svc_names(req->src_svc_id), > req- > > >src_svc_id, > > +
Re: [devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
Hi bro.Vu, I think it won't cause memleak since I break after m_MMGR_REMOVE_FROM_START() free(body) Best Regards, ThuanTr -Original Message- From: Vu Minh Nguyen Sent: Wednesday, April 24, 2019 1:45 PM To: 'thuan.tran' ; hans.nordeb...@ericsson.com; 'Minh Hon Chau' Cc: opensaf-devel@lists.sourceforge.net Subject: RE: [PATCH 1/1] mds: support multicast fragmented messages [#3033] Hi Thuan, Tested broadcasting with large packages (each 1M). Ack with below comments. Regards, Vu > -Original Message- > From: thuan.tran > Sent: Wednesday, April 24, 2019 12:00 PM > To: hans.nordeb...@ericsson.com; Vu Minh Nguyen > ; Minh Hon Chau c...@users.sourceforge.net> > Cc: opensaf-devel@lists.sourceforge.net; thuan.tran > > Subject: [PATCH 1/1] mds: support multicast fragmented messages > [#3033] > > - Sender may send broadcast big messages (> 65K) then small messages > (< 65K). > Current MDS just loop via all destinations to unicast all fragmented messages > to one by one destinations. But sending multicast non-fragment > messages to all destinations. Therefor, receivers may get messages > with incorrect order, non-fragment messages may come before fragmented > messages. > For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. > - Solution: support send multicast each fragmented messages to avoid > disorder of arrived broadcast messages. > --- > src/mds/mds_c_sndrcv.c | 3 +-- > src/mds/mds_dt_tipc.c | 55 > -- > 2 files changed, 45 insertions(+), 13 deletions(-) > > diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c index > 703bc8e..7850ac7 100644 > --- a/src/mds/mds_c_sndrcv.c > +++ b/src/mds/mds_c_sndrcv.c > @@ -4496,8 +4496,7 @@ static uint32_t > mcm_pvt_process_svc_bcast_common( > info_result->key.vdest_id, req, 0, > info_result->key.adest, pri); > if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && > - (tipc_mode_enabled) && (tipc_mcast_enabled) && > - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { > + (tipc_mode_enabled) && (tipc_mcast_enabled)) { > m_MDS_LOG_DBG( > "MDTM: Break while(1) prev_ver_sub_count: %d svc_id =%s(%d) > to_msg.bcast_buff_len: %d ", > svc_cb->subtn_info->prev_ver_sub_count, > diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c index > a3abff5..656f79c 100644 > --- a/src/mds/mds_dt_tipc.c > +++ b/src/mds/mds_dt_tipc.c > @@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, > uint32_t seq_num, > uint16_t frag_val = 0; > uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; > int version = req->msg_arch_word & 0x7; > + uint32_t ret = NCSCC_RC_SUCCESS; > > if (version > 1) { > sum_mds_hdr_plus_mdtm_hdr_plus_len = @@ -2952,11 +2953,23 @@ > uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, > free(body); > return NCSCC_RC_FAILURE; > } > - m_MDS_LOG_DBG( > - "MDTM:Sending message with Service > Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO > Dest_Tipc_id=<0x%08x:%u>", > - req->svc_seq_num, seq_num, frag_val, > - id.node, id.ref); > - mdtm_sendto(body, len_buf, id); > + if (((req->snd_type == MDS_SENDTYPE_RBCAST) > || > + (req->snd_type == MDS_SENDTYPE_BCAST)) > && > + (version > 0) && (tipc_mcast_enabled)) { > + m_MDS_LOG_DBG( > + "MDTM:Send Multicast message with > Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d " > + "From svc_id = %s(%d) TO svc_id = > %s(%d)", > + req->svc_seq_num, seq_num, > frag_val, > + get_svc_names(req->src_svc_id), req- > >src_svc_id, > + get_svc_names(req->dest_svc_id), > req->dest_svc_id); > + ret = mdtm_mcast_sendto(body, > len_buf, req); > + } else { > + m_MDS_LOG_DBG( > + "MDTM:Sending message with Service > Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO > Dest_Tipc_id=<0x%08x:%u>", > + req->svc_seq_num, seq_num, > frag_val, > + id.node, id.ref); > + ret = mdtm_sendto(body, len_buf, id); > + }
Re: [devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
Hi Thuan, Tested broadcasting with large packages (each 1M). Ack with below comments. Regards, Vu > -Original Message- > From: thuan.tran > Sent: Wednesday, April 24, 2019 12:00 PM > To: hans.nordeb...@ericsson.com; Vu Minh Nguyen > ; Minh Hon Chau c...@users.sourceforge.net> > Cc: opensaf-devel@lists.sourceforge.net; thuan.tran > > Subject: [PATCH 1/1] mds: support multicast fragmented messages [#3033] > > - Sender may send broadcast big messages (> 65K) then small messages (< > 65K). > Current MDS just loop via all destinations to unicast all fragmented messages > to one by one destinations. But sending multicast non-fragment messages to > all > destinations. Therefor, receivers may get messages with incorrect order, > non-fragment messages may come before fragmented messages. > For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. > - Solution: support send multicast each fragmented messages to avoid > disorder of arrived broadcast messages. > --- > src/mds/mds_c_sndrcv.c | 3 +-- > src/mds/mds_dt_tipc.c | 55 > -- > 2 files changed, 45 insertions(+), 13 deletions(-) > > diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c > index 703bc8e..7850ac7 100644 > --- a/src/mds/mds_c_sndrcv.c > +++ b/src/mds/mds_c_sndrcv.c > @@ -4496,8 +4496,7 @@ static uint32_t > mcm_pvt_process_svc_bcast_common( > info_result->key.vdest_id, req, 0, > info_result->key.adest, pri); > if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && > - (tipc_mode_enabled) && (tipc_mcast_enabled) && > - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { > + (tipc_mode_enabled) && (tipc_mcast_enabled)) { > m_MDS_LOG_DBG( > "MDTM: Break while(1) prev_ver_sub_count: %d > svc_id =%s(%d) to_msg.bcast_buff_len: %d ", > svc_cb->subtn_info->prev_ver_sub_count, > diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c > index a3abff5..656f79c 100644 > --- a/src/mds/mds_dt_tipc.c > +++ b/src/mds/mds_dt_tipc.c > @@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ > *req, uint32_t seq_num, > uint16_t frag_val = 0; > uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; > int version = req->msg_arch_word & 0x7; > + uint32_t ret = NCSCC_RC_SUCCESS; > > if (version > 1) { > sum_mds_hdr_plus_mdtm_hdr_plus_len = > @@ -2952,11 +2953,23 @@ uint32_t > mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, > free(body); > return NCSCC_RC_FAILURE; > } > - m_MDS_LOG_DBG( > - "MDTM:Sending message with Service > Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO > Dest_Tipc_id=<0x%08x:%u>", > - req->svc_seq_num, seq_num, frag_val, > - id.node, id.ref); > - mdtm_sendto(body, len_buf, id); > + if (((req->snd_type == MDS_SENDTYPE_RBCAST) > || > + (req->snd_type == MDS_SENDTYPE_BCAST)) > && > + (version > 0) && (tipc_mcast_enabled)) { > + m_MDS_LOG_DBG( > + "MDTM:Send Multicast message with > Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d " > + "From svc_id = %s(%d) TO svc_id = > %s(%d)", > + req->svc_seq_num, seq_num, > frag_val, > + get_svc_names(req->src_svc_id), req- > >src_svc_id, > + get_svc_names(req->dest_svc_id), > req->dest_svc_id); > + ret = mdtm_mcast_sendto(body, > len_buf, req); > + } else { > + m_MDS_LOG_DBG( > + "MDTM:Sending message with Service > Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO > Dest_Tipc_id=<0x%08x:%u>", > + req->svc_seq_num, seq_num, > frag_val, > + id.node, id.ref); > + ret = mdtm_sendto(body, len_buf, id); > + } > m_MMGR_REMOVE_FROM_START( > , > len_buf - > @@ -2965,6 +2978,9 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ > *req, uint32_t seq_num, > len = > len - (len_buf - > > sum_mds_hdr_plus_mdtm_hdr_plus_len); > + if (ret !=
[devel] [PATCH 1/1] mds: support multicast fragmented messages [#3033]
- Sender may send broadcast big messages (> 65K) then small messages (< 65K). Current MDS just loop via all destinations to unicast all fragmented messages to one by one destinations. But sending multicast non-fragment messages to all destinations. Therefor, receivers may get messages with incorrect order, non-fragment messages may come before fragmented messages. For example, it may lead to OUT OF ORDER for IMMNDs during IMMD sync. - Solution: support send multicast each fragmented messages to avoid disorder of arrived broadcast messages. --- src/mds/mds_c_sndrcv.c | 3 +-- src/mds/mds_dt_tipc.c | 55 -- 2 files changed, 45 insertions(+), 13 deletions(-) diff --git a/src/mds/mds_c_sndrcv.c b/src/mds/mds_c_sndrcv.c index 703bc8e..7850ac7 100644 --- a/src/mds/mds_c_sndrcv.c +++ b/src/mds/mds_c_sndrcv.c @@ -4496,8 +4496,7 @@ static uint32_t mcm_pvt_process_svc_bcast_common( info_result->key.vdest_id, req, 0, info_result->key.adest, pri); if ((svc_cb->subtn_info->prev_ver_sub_count == 0) && - (tipc_mode_enabled) && (tipc_mcast_enabled) && - (to_msg.bcast_buff_len < MDS_DIRECT_BUF_MAXSIZE)) { + (tipc_mode_enabled) && (tipc_mcast_enabled)) { m_MDS_LOG_DBG( "MDTM: Break while(1) prev_ver_sub_count: %d svc_id =%s(%d) to_msg.bcast_buff_len: %d ", svc_cb->subtn_info->prev_ver_sub_count, diff --git a/src/mds/mds_dt_tipc.c b/src/mds/mds_dt_tipc.c index a3abff5..656f79c 100644 --- a/src/mds/mds_dt_tipc.c +++ b/src/mds/mds_dt_tipc.c @@ -2856,6 +2856,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, uint16_t frag_val = 0; uint32_t sum_mds_hdr_plus_mdtm_hdr_plus_len; int version = req->msg_arch_word & 0x7; + uint32_t ret = NCSCC_RC_SUCCESS; if (version > 1) { sum_mds_hdr_plus_mdtm_hdr_plus_len = @@ -2952,11 +2953,23 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, free(body); return NCSCC_RC_FAILURE; } - m_MDS_LOG_DBG( - "MDTM:Sending message with Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO Dest_Tipc_id=<0x%08x:%u>", - req->svc_seq_num, seq_num, frag_val, - id.node, id.ref); - mdtm_sendto(body, len_buf, id); + if (((req->snd_type == MDS_SENDTYPE_RBCAST) || +(req->snd_type == MDS_SENDTYPE_BCAST)) && + (version > 0) && (tipc_mcast_enabled)) { + m_MDS_LOG_DBG( + "MDTM:Send Multicast message with Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d " + "From svc_id = %s(%d) TO svc_id = %s(%d)", + req->svc_seq_num, seq_num, frag_val, + get_svc_names(req->src_svc_id), req->src_svc_id, + get_svc_names(req->dest_svc_id), req->dest_svc_id); + ret = mdtm_mcast_sendto(body, len_buf, req); + } else { + m_MDS_LOG_DBG( + "MDTM:Sending message with Service Seqno=%d, Fragment Seqnum=%d, frag_num=%d, TO Dest_Tipc_id=<0x%08x:%u>", + req->svc_seq_num, seq_num, frag_val, + id.node, id.ref); + ret = mdtm_sendto(body, len_buf, id); + } m_MMGR_REMOVE_FROM_START( , len_buf - @@ -2965,6 +2978,9 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num, len = len - (len_buf - sum_mds_hdr_plus_mdtm_hdr_plus_len); + if (ret != NCSCC_RC_SUCCESS) + // Failed to send a fragmented msg, stop sending + break; } else { p8 = (uint8_t *)m_MMGR_DATA_AT_START( usrbuf, len_buf - MDTM_FRAG_HDR_PLUS_LEN_2, @@ -2984,11 +3000,23 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_REQ *req, uint32_t seq_num,