Ack with minor comment. Please clear off the content of allocated memory (memset to zero) or can use calloc.
Thanks, Ramesh. On 1/30/2015 3:02 PM, mahesh.va...@oracle.com wrote: > osaf/libs/core/mds/mds_dt_tipc.c | 39 ++++++++++++++++++++++----------- > osaf/libs/core/mds/mds_dt_trans.c | 45 > ++++++++++++++++++++++++++------------- > 2 files changed, 56 insertions(+), 28 deletions(-) > > > Bug Description : > ----------------- > To improve performance and reduce the traffic of transport , we enhanced the > MDS send size in a single message > from 18000 to 65536 as part of enhancement Ticket #654. 6.5 release > > If MDS application task was created with less stack size say 32000 and try to > send huge message say > above 65536 bytes size message using MDS_SENDTYPE_RSP type, because of > static allocation of bigger message, > the application will run out of stack and the application will segfault. > > use-case of sending huge data using MDS_SENDTYPE_RSP send-type is so far > unique use-case , > so far application are using SENDTYPE_RSP for to send a short response > message received through SENDTYPE_SNDRSP. > Ideally application were using SENDTYPE_SND, SENDTYPE_SNDRSP, SENDTYPE_SNDRAC > & SENDTYPE_SNDACK, > send-types to send huge message to avoid timeout issue between send & > response, > that is why the issue was NOT reproducible with normal send messages. > > Bug Fix: > ---------- > So, this ticket converts send message buffer from static allocation in to > dynamic memory allocation to avoid stack deficiency > and this bug will only address full encode send path of message ( the bug > reported flow only ) , which is generic use case and need to be addressed > immediately. > > Please note that, this patch will fix the full-encode send flow of > applications only. there is another ticket:#1246, > which will identify more send flows (out side of the current issue ) of MDS > transport and fix them in > coherent manner and will go through complete cycle of testing of all send > flows. > > diff --git a/osaf/libs/core/mds/mds_dt_tipc.c > b/osaf/libs/core/mds/mds_dt_tipc.c > --- a/osaf/libs/core/mds/mds_dt_tipc.c > +++ b/osaf/libs/core/mds/mds_dt_tipc.c > @@ -2131,17 +2131,19 @@ uint32_t mds_mdtm_send_tipc(MDTM_SEND_RE > return mdtm_frag_and_send(req, > frag_seq_num, tipc_id, frag_size); > } else { > uint8_t *p8; > - uint8_t body[len + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN]; > + uint8_t *body = NULL; > + body = malloc(len + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN); > > p8 = (uint8_t > *)m_MMGR_DATA_AT_START(usrbuf, len, (char *) > - > &body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN]); > - > - if (p8 != > &body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN]) > - > memcpy(&body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN], p8, len); > + (body > + SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN)); > + > + if (p8 != (body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN)) > + memcpy((body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN), p8, len); > > if (NCSCC_RC_SUCCESS != > mdtm_add_mds_hdr(body, req)) { > m_MDS_LOG_ERR("MDTM: Unable to > add the mds Hdr to the send msg\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > @@ -2150,6 +2152,7 @@ uint32_t mds_mdtm_send_tipc(MDTM_SEND_RE > frag_seq_num, 0)) > { > m_MDS_LOG_ERR("MDTM: Unable to > add the frag Hdr to the send msg\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > @@ -2164,6 +2167,7 @@ uint32_t mds_mdtm_send_tipc(MDTM_SEND_RE > > ncsmds_svc_names[req->src_svc_id], ncsmds_svc_names[req->dest_svc_id]); > if ( len > > MDS_DIRECT_BUF_MAXSIZE) { > > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > LOG_NO("MDTM: Not > possible to send size:%d TIPC multicast to svc_id = %s", > len, > ncsmds_svc_names[req->dest_svc_id]); > return NCSCC_RC_FAILURE; > @@ -2174,16 +2178,19 @@ uint32_t mds_mdtm_send_tipc(MDTM_SEND_RE > len, > ncsmds_svc_names[req->src_svc_id], > > ncsmds_svc_names[req->dest_svc_id], strerror(errno)); > > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > } else { > if (NCSCC_RC_SUCCESS != > mdtm_sendto(body, len, tipc_id)) { > m_MDS_LOG_ERR("MDTM: > Unable to send the msg thru TIPC\n"); > > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > } > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_SUCCESS; > } > } > @@ -2322,17 +2329,19 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_RE > frag_val = NO_FRAG_BIT | i; > } > { > - uint8_t body[len_buf]; > + uint8_t *body = NULL; > + body = malloc(len_buf); > if (i == 1) { > p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, > (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN), > - (char > *)&body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN]); > - > - if (p8 != > &body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN]) > - > memcpy(&body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN], p8, (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN)); > + (char *)(body > + SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN)); > + > + if (p8 != (body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN)) > + memcpy((body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN), p8, (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN)); > > if (NCSCC_RC_SUCCESS != mdtm_add_mds_hdr(body, > req)) { > m_MDS_LOG_ERR("MDTM: frg MDS hdr > addition failed\n"); > + free(body); > m_MMGR_FREE_BUFR_LIST(usrbuf); > return NCSCC_RC_FAILURE; > } > @@ -2340,6 +2349,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_RE > if (NCSCC_RC_SUCCESS != mdtm_add_frag_hdr(body, > len_buf, seq_num, frag_val)) { > m_MDS_LOG_ERR("MDTM: Frag hdr addition > failed\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > m_MDS_LOG_DBG > @@ -2347,16 +2357,18 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_RE > req->svc_seq_num, seq_num, frag_val, > id.node, id.ref); > mdtm_sendto(body, len_buf, id); > m_MMGR_REMOVE_FROM_START(&usrbuf, len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN); > + free(body); > len = len - (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN); > } else { > p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, > len_buf - MDTM_FRAG_HDR_PLUS_LEN_2, > - (char > *)&body[MDTM_FRAG_HDR_PLUS_LEN_2]); > - if (p8 != &body[MDTM_FRAG_HDR_PLUS_LEN_2]) > - memcpy(&body[MDTM_FRAG_HDR_PLUS_LEN_2], > p8, len_buf - MDTM_FRAG_HDR_PLUS_LEN_2); > + (char *)(body > + MDTM_FRAG_HDR_PLUS_LEN_2)); > + if (p8 != (body + MDTM_FRAG_HDR_PLUS_LEN_2)) > + memcpy((body + > MDTM_FRAG_HDR_PLUS_LEN_2), p8, len_buf - MDTM_FRAG_HDR_PLUS_LEN_2); > > if (NCSCC_RC_SUCCESS != mdtm_add_frag_hdr(body, > len_buf, seq_num, frag_val)) { > m_MDS_LOG_ERR("MDTM: Frag hde addition > failed\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > m_MDS_LOG_DBG > @@ -2364,6 +2376,7 @@ uint32_t mdtm_frag_and_send(MDTM_SEND_RE > req->svc_seq_num, seq_num, frag_val, > id.node, id.ref); > mdtm_sendto(body, len_buf, id); > m_MMGR_REMOVE_FROM_START(&usrbuf, (len_buf - > MDTM_FRAG_HDR_PLUS_LEN_2)); > + free(body); > len = len - (len_buf - > MDTM_FRAG_HDR_PLUS_LEN_2); > if (len == 0) > break; > diff --git a/osaf/libs/core/mds/mds_dt_trans.c > b/osaf/libs/core/mds/mds_dt_trans.c > --- a/osaf/libs/core/mds/mds_dt_trans.c > +++ b/osaf/libs/core/mds/mds_dt_trans.c > @@ -269,25 +269,28 @@ static uint32_t mdtm_frag_and_send_tcp(M > frag_val = NO_FRAG_BIT | i; > } > { > - uint8_t body[len_buf]; > + uint8_t *body = NULL; > + body = malloc(len_buf); > if (i == 1) { > p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, > (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP), > (char *) > - > &body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP]); > + (body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)); > > - if (p8 != > &body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP]) > - > memcpy(&body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP], p8, (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)); > + if (p8 != (body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)) > + memcpy((body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP), p8, (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)); > > if (NCSCC_RC_SUCCESS != > mdtm_add_mds_hdr_tcp(body, req, len_buf)) { > m_MDS_LOG_ERR("MDTM: frg MDS hdr > addition failed\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > - if (NCSCC_RC_SUCCESS != > mdtm_add_frag_hdr_tcp(&body[24], len_buf, seq_num, frag_val)) { > + if (NCSCC_RC_SUCCESS != > mdtm_add_frag_hdr_tcp((body + 24), len_buf, seq_num, frag_val)) { > m_MDS_LOG_ERR("MDTM: Frag hdr addition > failed\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > m_MDS_LOG_DBG > @@ -295,27 +298,32 @@ static uint32_t mdtm_frag_and_send_tcp(M > req->svc_seq_num, seq_num, frag_val, > id.node_id, id.process_id); > > if (NCSCC_RC_SUCCESS != mds_sock_send(body, > len_buf)) { > + m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > m_MMGR_REMOVE_FROM_START(&usrbuf, len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP); > + free(body); > len = len - (len_buf - > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP); > } else { > p8 = (uint8_t *)m_MMGR_DATA_AT_START(usrbuf, > len_buf - MDTM_FRAG_HDR_PLUS_LEN_2_TCP, > - (char > *)&body[MDTM_FRAG_HDR_PLUS_LEN_2_TCP]); > - if (p8 != &body[MDTM_FRAG_HDR_PLUS_LEN_2_TCP]) > - > memcpy(&body[MDTM_FRAG_HDR_PLUS_LEN_2_TCP], p8, > + (char *)(body > + MDTM_FRAG_HDR_PLUS_LEN_2_TCP)); > + if (p8 != (body + MDTM_FRAG_HDR_PLUS_LEN_2_TCP)) > + memcpy((body + > MDTM_FRAG_HDR_PLUS_LEN_2_TCP), p8, > len_buf - > MDTM_FRAG_HDR_PLUS_LEN_2_TCP); > > if (NCSCC_RC_SUCCESS != > mdtm_fill_frag_hdr_tcp(body, req, len_buf)) { > m_MDS_LOG_ERR("MDTM: Frag hdr addition > failed\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > - if (NCSCC_RC_SUCCESS != > mdtm_add_frag_hdr_tcp(&body[24], len_buf, seq_num, frag_val)) { > + if (NCSCC_RC_SUCCESS != > mdtm_add_frag_hdr_tcp((body + 24), len_buf, seq_num, frag_val)) { > m_MDS_LOG_ERR("MDTM: Frag hde addition > failed\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > m_MDS_LOG_DBG > @@ -323,10 +331,13 @@ static uint32_t mdtm_frag_and_send_tcp(M > req->svc_seq_num, seq_num, frag_val, > id.node_id, id.process_id); > > if (NCSCC_RC_SUCCESS != mds_sock_send(body, > len_buf)) { > + m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > m_MMGR_REMOVE_FROM_START(&usrbuf, (len_buf - > MDTM_FRAG_HDR_PLUS_LEN_2_TCP)); > + free(body); > len = len - (len_buf - > MDTM_FRAG_HDR_PLUS_LEN_2_TCP); > if (len == 0) > break; > @@ -450,29 +461,31 @@ uint32_t mds_mdtm_send_tcp(MDTM_SEND_REQ > > } else { > uint8_t *p8; > - uint8_t body[len + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP]; > + uint8_t *body = NULL; > + body = malloc(len + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP); > > p8 = (uint8_t > *)m_MMGR_DATA_AT_START(usrbuf, len, (char *) > - &body > - > [SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP]); > + (body > + SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)); > > - if (p8 != > &body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP]) > - > memcpy(&body[SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP], p8, len); > + if (p8 != (body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)) > + memcpy((body + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP), p8, len); > > if (NCSCC_RC_SUCCESS != > mdtm_add_mds_hdr_tcp(body, req, > len + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP)) { > m_MDS_LOG_ERR("MDTM: Unable to > add the mds Hdr to the send msg\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > if (NCSCC_RC_SUCCESS != > - mdtm_add_frag_hdr_tcp(&body[24], > + mdtm_add_frag_hdr_tcp((body + 24), > (len + > SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP), > frag_seq_num, > 0)) { > m_MDS_LOG_ERR("MDTM: Unable to > add the frag Hdr to the send msg\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > > @@ -483,9 +496,11 @@ uint32_t mds_mdtm_send_tcp(MDTM_SEND_REQ > if (NCSCC_RC_SUCCESS != > mds_sock_send(body, (len + SUM_MDS_HDR_PLUS_MDTM_HDR_PLUS_LEN_TCP))) { > m_MDS_LOG_ERR("MDTM: Unable to > send the msg thru TIPC\n"); > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_FAILURE; > } > m_MMGR_FREE_BUFR_LIST(usrbuf); > + free(body); > return NCSCC_RC_SUCCESS; > } > } ------------------------------------------------------------------------------ Dive into the World of Parallel Programming. The Go Parallel Website, sponsored by Intel and developed in partnership with Slashdot Media, is your hub for all things parallel software development, from weekly thought leadership blogs to news, videos, case studies, tutorials and more. Take a look and join the conversation now. http://goparallel.sourceforge.net/ _______________________________________________ Opensaf-devel mailing list Opensaf-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/opensaf-devel