- Correct helgrind issues in mds/apitest
---
src/mds/apitest/mdstest.c | 7 +-
src/mds/apitest/mdstipc.h | 7 +-
src/mds/apitest/mdstipc_api.c | 196 +++++++++++++++++++++++++++++++++--------
src/mds/apitest/mdstipc_conf.c | 89 +++++++++++++------
4 files changed, 234 insertions(+), 65 deletions(-)
diff --git a/src/mds/apitest/mdstest.c b/src/mds/apitest/mdstest.c
index bf6e173..3280e5b 100644
--- a/src/mds/apitest/mdstest.c
+++ b/src/mds/apitest/mdstest.c
@@ -35,6 +35,7 @@
//#include "mdstest.h"
SaAisErrorT rc;
+pthread_rwlock_t gl_lock;
int mds_startup(void)
{
@@ -83,13 +84,17 @@ int main(int argc, char **argv)
if (suite == 999) {
return 0;
}
-
if (mds_startup() != 0) {
printf("Fail to start mds agents\n");
return 1;
}
+ pthread_rwlock_init(&gl_lock, NULL);
+
int rc = test_run(suite, tcase);
+
+ pthread_rwlock_destroy(&gl_lock);
+
mds_shutdown();
return rc;
}
diff --git a/src/mds/apitest/mdstipc.h b/src/mds/apitest/mdstipc.h
index fbb6468..9e93a17 100644
--- a/src/mds/apitest/mdstipc.h
+++ b/src/mds/apitest/mdstipc.h
@@ -145,13 +145,12 @@ typedef struct tet_mds_recvd_msg_info {
} TET_MDS_RECVD_MSG_INFO;
/********************* GLOBAL variables ********************/
+extern _Thread_local NCSMDS_INFO svc_to_mds_info;
+extern pthread_rwlock_t gl_lock;
+
TET_ADEST gl_tet_adest;
TET_VDEST
gl_tet_vdest[4]; /*change it to 6 to run VDS Redundancy: 101 for Stress*/
-NCSADA_INFO ada_info;
-NCSVDA_INFO vda_info;
-NCSMDS_INFO svc_to_mds_info;
-TET_EVENT_INFO gl_event_data;
TET_SVC gl_tet_svc;
TET_MDS_RECVD_MSG_INFO gl_rcvdmsginfo, gl_direct_rcvmsginfo;
int gl_vdest_indx;
diff --git a/src/mds/apitest/mdstipc_api.c b/src/mds/apitest/mdstipc_api.c
index 5eb8bd9..3a98ecd 100644
--- a/src/mds/apitest/mdstipc_api.c
+++ b/src/mds/apitest/mdstipc_api.c
@@ -33,6 +33,28 @@ static MDS_CLIENT_MSG_FORMAT_VER gl_set_msg_fmt_ver;
MDS_SVC_ID svc_ids[3] = {2006, 2007, 2008};
+pthread_mutex_t safe_printf_mutex = PTHREAD_MUTEX_INITIALIZER;
+_Thread_local NCSMDS_INFO svc_to_mds_info;
+
+void safe_printf(const char* format, ... ) {
+ pthread_mutex_lock(&safe_printf_mutex);
+ va_list args;
+ va_start(args, format);
+ vfprintf(stdout, format, args);
+ va_end(args);
+ pthread_mutex_unlock(&safe_printf_mutex);
+}
+int safe_fflush(FILE *stream) {
+ int rc = 0;
+ pthread_mutex_lock(&safe_printf_mutex);
+ rc = fflush(stream);
+ pthread_mutex_unlock(&safe_printf_mutex);
+ return rc;
+}
+
+#define printf safe_printf
+#define fflush safe_fflush
+
/*****************************************************************************/
/************ SERVICE API TEST CASES ********************************/
/*****************************************************************************/
@@ -363,6 +385,7 @@ void tet_svc_install_tp_10()
{
int FAIL = 0;
SaUint32T rc;
+ NCSCONTEXT t_handle = 0;
// Creating a MxN VDEST with id = 2000
rc = create_vdest(NCS_VDEST_TYPE_MxN, 2000);
if (rc != NCSCC_RC_SUCCESS) {
@@ -373,25 +396,25 @@ void tet_svc_install_tp_10()
printf(
"\nTest case 10:Installing the External MIN service EXTMIN in a
seperate thread and Uninstalling it here\n");
// Install thread
- rc = tet_create_task((NCS_OS_CB)tet_vdest_install_thread,
- gl_tet_vdest[0].svc[0].task.t_handle);
+ rc = tet_create_task((NCS_OS_CB)tet_vdest_install_thread, t_handle);
if (rc != NCSCC_RC_SUCCESS) {
printf("\nFail to Install thread\n");
FAIL = 1;
}
-
// Now Release the Install Thread
- rc = tet_release_task(gl_tet_vdest[0].svc[0].task.t_handle);
+ rc = tet_release_task(t_handle);
if (rc != NCSCC_RC_SUCCESS) {
printf("\nFail to release thread\n");
FAIL = 1;
}
// Counter shall be != 0
+ pthread_rwlock_rdlock(&gl_lock);
if (gl_tet_vdest[0].svc_count == 0) {
printf("\nsvc_count == 0\n");
FAIL = 1;
};
+ pthread_rwlock_unlock(&gl_lock);
// Uninstalling the above service
rc = mds_service_uninstall(gl_tet_vdest[0].mds_pwe1_hdl,
@@ -809,8 +832,7 @@ void tet_vdest_uninstall_thread()
{
// Inside Thread
printf("tet_vdest_uninstall_thread\n");
- test_validate(mds_service_uninstall(gl_tet_vdest[0].mds_pwe1_hdl, 500),
- NCSCC_RC_SUCCESS);
+ mds_service_uninstall(gl_tet_vdest[0].mds_pwe1_hdl, 500);
}
void tet_svc_unstall_tp_1()
@@ -989,11 +1011,13 @@ void tet_svc_unstall_tp_5()
}
// Test gl_tet_vdest[0].svc_count == 0
+ pthread_rwlock_rdlock(&gl_lock);
if (gl_tet_vdest[0].svc_count != 0) {
printf("\nsvc_count is %d, should be 0\n",
gl_tet_vdest[0].svc_count);
FAIL = 1;
}
+ pthread_rwlock_unlock(&gl_lock);
// Destroying a MxN VDEST with id = 1001
rc = destroy_vdest(1001);
@@ -2425,11 +2449,13 @@ void tet_svc_subscr_ADEST_8()
NCSCC_RC_SUCCESS) {
printf("\nTASK is released\n");
}
+ pthread_rwlock_rdlock(&gl_lock);
if (gl_tet_adest.svc[0].subscr_count) {
printf("Cancel Fail\n");
FAIL = 1;
} else
printf("\nSuccess\n");
+ pthread_rwlock_unlock(&gl_lock);
}
// clean up
@@ -4636,7 +4662,7 @@ void tet_query_pwe_tp_3()
void tet_adest_rcvr_thread()
{
MDS_SVC_ID svc_id;
- int FAIL = 0;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -4653,29 +4679,32 @@ void tet_adest_rcvr_thread()
NCSMDS_SVC_ID_EXTERNAL_MIN,
SA_DISPATCH_ALL) != NCSCC_RC_SUCCESS) {
printf("Fail mds_service_retrieve\n");
- FAIL = 1;
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_send_response(gl_tet_adest.pwe[0].mds_pwe_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN,
mesg) != NCSCC_RC_SUCCESS) {
printf("Response Fail\n");
- FAIL = 1;
} else
printf("Response Success\n");
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
free(mesg);
- test_validate(FAIL, 0);
}
void tet_adest_rcvr_svc_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -4695,14 +4724,19 @@ void tet_adest_rcvr_svc_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_send_response(gl_tet_adest.mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN,
mesg) != NCSCC_RC_SUCCESS) {
printf("Response Fail\n");
} else
printf("Response Success\n");
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
@@ -4712,6 +4746,7 @@ void tet_adest_rcvr_svc_thread()
void tet_vdest_rcvr_resp_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -4728,7 +4763,10 @@ void tet_vdest_rcvr_resp_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_send_response(gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN,
mesg) != NCSCC_RC_SUCCESS) {
@@ -5571,6 +5609,7 @@ TODO: Check this testcase, it was outcomment already in
the "tet"-files
void tet_vdest_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Yes Sender! I am in. Message Delivered?";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -5588,14 +5627,19 @@ void tet_vdest_rcvr_thread()
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_sendrsp_getack(gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, 0,
mesg) != NCSCC_RC_SUCCESS) {
printf("Response Fail\n");
} else
printf("Response Ack Success\n");
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
/*if(mds_send_redrsp_getack(gl_tet_vdest[1].mds_pwe1_hdl,
2000,300)!=NCSCC_RC_SUCCESS)
{ printf("Response Fail\n");FAIL=1; }
@@ -5610,6 +5654,7 @@ void tet_vdest_rcvr_thread()
void tet_Dadest_all_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -5629,8 +5674,10 @@ void tet_Dadest_all_rcvr_thread()
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
- printf("i am here\n");
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5641,7 +5688,9 @@ void tet_Dadest_all_rcvr_thread()
MDS_SENDTYPE_RSP, 0) != NCSCC_RC_SUCCESS) {
printf("\nFail\n");
}
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -5652,7 +5701,7 @@ void tet_Dadest_all_rcvr_thread()
void tet_Dadest_all_chgrole_rcvr_thread()
{
MDS_SVC_ID svc_id;
-
+ bool rsp_reqd = false;
printf("\nInside CHG ROLE ADEST direct Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_adest_sel_obj_found(1))) {
@@ -5665,7 +5714,10 @@ void tet_Dadest_all_chgrole_rcvr_thread()
printf("\nFail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5682,8 +5734,9 @@ void tet_Dadest_all_chgrole_rcvr_thread()
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
}
-
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -5692,6 +5745,7 @@ void tet_Dadest_all_chgrole_rcvr_thread()
void tet_adest_all_chgrole_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -5712,7 +5766,10 @@ void tet_adest_all_chgrole_rcvr_thread()
} else {
/*after that send a response to the sender, if it
* expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5729,8 +5786,9 @@ void tet_adest_all_chgrole_rcvr_thread()
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
}
-
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
}
@@ -5740,6 +5798,7 @@ void tet_adest_all_chgrole_rcvr_thread()
void tet_vdest_all_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -5764,13 +5823,18 @@ void tet_vdest_all_rcvr_thread()
printf("\nFail\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_send_response(gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN,
mesg) != NCSCC_RC_SUCCESS) {
printf("\nFail\n");
}
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -5779,6 +5843,7 @@ void tet_vdest_all_rcvr_thread()
void tet_adest_all_rcvrack_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
uint32_t rs;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
@@ -5798,7 +5863,10 @@ void tet_adest_all_rcvrack_thread()
printf("\nFailmds_service_retrieve \n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5809,7 +5877,9 @@ void tet_adest_all_rcvrack_thread()
mesg);
printf("\nResponse code is %d", rs);
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -5819,6 +5889,7 @@ void tet_adest_all_rcvrack_thread()
void tet_adest_all_rcvrack_chgrole_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
uint32_t rs;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
@@ -5838,7 +5909,10 @@ void tet_adest_all_rcvrack_chgrole_thread()
printf("\nFail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5860,7 +5934,9 @@ void tet_adest_all_rcvrack_chgrole_thread()
fflush(stdout);
printf("\nResponse code is %d", rs);
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -5870,6 +5946,7 @@ void tet_adest_all_rcvrack_chgrole_thread()
void tet_Dadest_all_rcvrack_chgrole_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
uint32_t rs = 0;
printf("\nInside Receiver Thread\n");
@@ -5883,7 +5960,10 @@ void tet_Dadest_all_rcvrack_chgrole_thread()
printf("\nFail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5911,7 +5991,9 @@ void tet_Dadest_all_rcvrack_chgrole_thread()
fflush(stdout);
printf("\nResponse code is %d", rs);
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -5932,6 +6014,7 @@ void tet_change_role_thread()
void tet_adest_all_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
char tmp[] = " Hi Sender! My Name is RECEIVER ";
TET_MDS_MSG *mesg;
mesg = (TET_MDS_MSG *)malloc(sizeof(TET_MDS_MSG));
@@ -5950,7 +6033,10 @@ void tet_adest_all_rcvr_thread()
printf("\nFail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_rcvdmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_rcvdmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (vdest_change_role(200, V_DEST_RL_STANDBY) !=
NCSCC_RC_SUCCESS) {
printf("\nFail\n");
@@ -5960,7 +6046,9 @@ void tet_adest_all_rcvr_thread()
mesg) != NCSCC_RC_SUCCESS) {
printf("\nFail\n");
}
+ pthread_rwlock_wrlock(&gl_lock);
gl_rcvdmsginfo.rsp_reqd = 0;
+ pthread_rwlock_unlock(&gl_lock);
}
}
fflush(stdout);
@@ -6985,9 +7073,11 @@ void tet_vdest_Srcvr_thread()
printf("\nFail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
+ pthread_rwlock_wrlock(&gl_lock);
if (gl_rcvdmsginfo.rsp_reqd) {
gl_rcvdmsginfo.rsp_reqd = 0;
}
+ pthread_rwlock_unlock(&gl_lock);
/*
if(mds_send_redrsp_getack(gl_tet_vdest[0].mds_pwe1_hdl,2000,
300)!=NCSCC_RC_SUCCESS)
{ printf("Response Ack Fail\n");FAIL=1; }
@@ -10189,6 +10279,7 @@ void tet_direct_send_ack_tp_13()
void tet_Dadest_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_adest_sel_obj_found(3))) {
@@ -10198,7 +10289,10 @@ void tet_Dadest_rcvr_thread()
SA_DISPATCH_ALL) == NCSCC_RC_SUCCESS) {
/*after that send a response to the sender, if it
* expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(
gl_tet_adest.pwe[0].mds_pwe_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, 1,
@@ -10217,6 +10311,7 @@ void tet_Dadest_rcvr_thread()
void tet_Dvdest_rcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(1, 1))) {
@@ -10226,7 +10321,10 @@ void tet_Dvdest_rcvr_thread()
printf("\nFail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, 1,
MDS_SENDTYPE_SNDRACK,
@@ -10241,6 +10339,7 @@ void tet_Dvdest_rcvr_thread()
void tet_Dvdest_rcvr_all_rack_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(1, 1))) {
@@ -10250,7 +10349,10 @@ void tet_Dvdest_rcvr_all_rack_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(
gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, gl_set_msg_fmt_ver,
@@ -10266,6 +10368,7 @@ void tet_Dvdest_rcvr_all_rack_thread()
void tet_Dvdest_rcvr_all_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(1, 1))) {
@@ -10275,7 +10378,10 @@ void tet_Dvdest_rcvr_all_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(
gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, gl_set_msg_fmt_ver,
@@ -10290,6 +10396,7 @@ void tet_Dvdest_rcvr_all_thread()
void tet_Dvdest_rcvr_all_chg_role_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
sleep(10);
@@ -10304,7 +10411,10 @@ void tet_Dvdest_rcvr_all_chg_role_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(
gl_tet_vdest[1].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, gl_set_msg_fmt_ver,
@@ -10320,6 +10430,7 @@ void tet_Dvdest_rcvr_all_chg_role_thread()
void tet_Dvdest_Srcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(0, 0))) {
@@ -10329,7 +10440,10 @@ void tet_Dvdest_Srcvr_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(gl_tet_vdest[0].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, 1,
MDS_SENDTYPE_RRSP,
@@ -10344,6 +10458,7 @@ void tet_Dvdest_Srcvr_thread()
void tet_Dvdest_Srcvr_all_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(0, 0))) {
@@ -10353,7 +10468,10 @@ void tet_Dvdest_Srcvr_all_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(
gl_tet_vdest[0].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, gl_set_msg_fmt_ver,
@@ -10369,6 +10487,7 @@ void tet_Dvdest_Srcvr_all_thread()
void tet_Dvdest_Arcvr_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(0, 0))) {
@@ -10378,7 +10497,10 @@ void tet_Dvdest_Arcvr_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(gl_tet_vdest[0].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, 1,
MDS_SENDTYPE_REDRACK,
@@ -10394,6 +10516,7 @@ void tet_Dvdest_Arcvr_thread()
void tet_Dvdest_Arcvr_all_thread()
{
MDS_SVC_ID svc_id;
+ bool rsp_reqd = false;
printf("\nInside Receiver Thread\n");
fflush(stdout);
if ((svc_id = is_vdest_sel_obj_found(0, 0))) {
@@ -10403,7 +10526,10 @@ void tet_Dvdest_Arcvr_all_thread()
printf("Fail mds_service_retrieve\n");
}
/*after that send a response to the sender, if it expects*/
- if (gl_direct_rcvmsginfo.rsp_reqd) {
+ pthread_rwlock_rdlock(&gl_lock);
+ rsp_reqd = gl_direct_rcvmsginfo.rsp_reqd;
+ pthread_rwlock_unlock(&gl_lock);
+ if (rsp_reqd) {
if (mds_direct_response(
gl_tet_vdest[0].mds_pwe1_hdl,
NCSMDS_SVC_ID_EXTERNAL_MIN, gl_set_msg_fmt_ver,
diff --git a/src/mds/apitest/mdstipc_conf.c b/src/mds/apitest/mdstipc_conf.c
index 2b28c66..e54d8d5 100644
--- a/src/mds/apitest/mdstipc_conf.c
+++ b/src/mds/apitest/mdstipc_conf.c
@@ -23,15 +23,24 @@
#include "base/osaf_poll.h"
extern int fill_syncparameters(int);
extern uint32_t mds_vdest_tbl_get_role(MDS_VDEST_ID vdest_id, V_DEST_RL *role);
+extern pthread_mutex_t gl_mds_library_mutex;
+
+extern pthread_mutex_t safe_printf_mutex;
+extern void safe_printf(const char* format, ... );
+extern int safe_fflush(FILE *stream);
+#define printf safe_printf
+#define fflush safe_fflush
+
/****************** ADEST WRAPPERS ***********************/
uint32_t adest_get_handle(void)
{
- memset(&ada_info, '\0', sizeof(ada_info));
- memset(&gl_tet_adest, '\0', sizeof(gl_tet_adest));
+ NCSADA_INFO ada_info;
ada_info.req = NCSADA_GET_HDLS;
if (ncsada_api(&ada_info) == NCSCC_RC_SUCCESS) {
+ pthread_rwlock_wrlock(&gl_lock);
+ memset(&gl_tet_adest, '\0', sizeof(gl_tet_adest));
gl_tet_adest.adest = ada_info.info.adest_get_hdls.o_adest;
printf("\nADEST <%llx > : GET_HDLS is SUCCESSFUL",
@@ -41,6 +50,7 @@ uint32_t adest_get_handle(void)
ada_info.info.adest_get_hdls.o_mds_pwe1_hdl;
gl_tet_adest.mds_adest_hdl =
ada_info.info.adest_get_hdls.o_mds_adest_hdl;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsada_api: GET_HDLS has FAILED");
@@ -50,7 +60,7 @@ uint32_t adest_get_handle(void)
uint32_t create_pwe_on_adest(MDS_HDL mds_adest_hdl, PW_ENV_ID pwe_id)
{
- memset(&ada_info, '\0', sizeof(ada_info));
+ NCSADA_INFO ada_info;
ada_info.req = NCSADA_PWE_CREATE;
ada_info.info.pwe_create.i_mds_adest_hdl = mds_adest_hdl;
@@ -59,10 +69,12 @@ uint32_t create_pwe_on_adest(MDS_HDL mds_adest_hdl,
PW_ENV_ID pwe_id)
if (ncsada_api(&ada_info) == NCSCC_RC_SUCCESS) {
printf("\nPWE_CREATE is SUCCESSFUL : PWE = %d", pwe_id);
+ pthread_rwlock_wrlock(&gl_lock);
gl_tet_adest.pwe[gl_tet_adest.pwe_count].pwe_id = pwe_id;
gl_tet_adest.pwe[gl_tet_adest.pwe_count].mds_pwe_hdl =
ada_info.info.pwe_create.o_mds_pwe_hdl;
gl_tet_adest.pwe_count++;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to PWE_CREATE on ADEST has FAILED\n");
@@ -72,14 +84,16 @@ uint32_t create_pwe_on_adest(MDS_HDL mds_adest_hdl,
PW_ENV_ID pwe_id)
uint32_t destroy_pwe_on_adest(MDS_HDL mds_pwe_hdl)
{
- memset(&ada_info, '\0', sizeof(ada_info));
+ NCSADA_INFO ada_info;
ada_info.req = NCSADA_PWE_DESTROY;
ada_info.info.pwe_destroy.i_mds_pwe_hdl = mds_pwe_hdl;
if (ncsada_api(&ada_info) == NCSCC_RC_SUCCESS) {
printf("\nADEST: PWE_DESTROY is SUCCESSFUL");
+ pthread_rwlock_wrlock(&gl_lock);
gl_tet_adest.pwe_count--;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest tO PWE_DESTROY on ADEST: has FAILED");
@@ -91,26 +105,28 @@ uint32_t destroy_pwe_on_adest(MDS_HDL mds_pwe_hdl)
uint32_t create_vdest(NCS_VDEST_TYPE policy, MDS_DEST vdest)
{
- memset(&vda_info, '\0', sizeof(vda_info));
- memset(&gl_tet_vdest[gl_vdest_indx], '\0', sizeof(TET_VDEST));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_VDEST_CREATE;
vda_info.info.vdest_create.i_policy = policy;
vda_info.info.vdest_create.i_create_type = NCSVDA_VDEST_CREATE_SPECIFIC;
- vda_info.info.vdest_create.info.specified.i_vdest =
- gl_tet_vdest[gl_vdest_indx].vdest = vdest;
+ vda_info.info.vdest_create.info.specified.i_vdest = vdest;
if (ncsvda_api(&vda_info) == NCSCC_RC_SUCCESS) {
printf("\n %lld : VDEST_CREATE is SUCCESSFUL",
(long long unsigned int)vdest);
fflush(stdout);
+ pthread_rwlock_wrlock(&gl_lock);
+ memset(&gl_tet_vdest[gl_vdest_indx], '\0', sizeof(TET_VDEST));
+ gl_tet_vdest[gl_vdest_indx].vdest = vdest;
gl_tet_vdest[gl_vdest_indx].mds_pwe1_hdl =
vda_info.info.vdest_create.o_mds_pwe1_hdl;
gl_tet_vdest[gl_vdest_indx].mds_vdest_hdl =
vda_info.info.vdest_create.o_mds_vdest_hdl;
gl_vdest_indx++;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsvda_api: VDEST_CREATE has FAILED\n");
@@ -119,7 +135,7 @@ uint32_t create_vdest(NCS_VDEST_TYPE policy, MDS_DEST vdest)
}
uint32_t destroy_vdest(MDS_DEST vdest)
{
- memset(&vda_info, '\0', sizeof(vda_info)); /*zeroizing*/
+ NCSVDA_INFO vda_info;
/*request*/
vda_info.req = NCSVDA_VDEST_DESTROY;
@@ -131,8 +147,10 @@ uint32_t destroy_vdest(MDS_DEST vdest)
printf("\n %lld : VDEST_DESTROY is SUCCESSFULL",
(long long unsigned int)vdest);
fflush(stdout);
+ pthread_rwlock_wrlock(&gl_lock);
memset(&gl_tet_vdest[gl_vdest_indx], '\0', sizeof(TET_VDEST));
gl_vdest_indx--;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsvda_api: VDEST_DESTROY has FAILED\n");
@@ -142,8 +160,7 @@ uint32_t destroy_vdest(MDS_DEST vdest)
uint32_t create_named_vdest(bool persistent, NCS_VDEST_TYPE policy, char
*vname)
{
- memset(&vda_info, '\0', sizeof(vda_info));
- memset(&gl_tet_vdest[gl_vdest_indx], '\0', sizeof(TET_VDEST));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_VDEST_CREATE;
@@ -169,6 +186,8 @@ uint32_t create_named_vdest(bool persistent, NCS_VDEST_TYPE
policy, char *vname)
if (ncsvda_api(&vda_info) == NCSCC_RC_SUCCESS) {
printf("\nNAMED VDEST_CREATE is SUCCESSFULL\n");
+ pthread_rwlock_wrlock(&gl_lock);
+ memset(&gl_tet_vdest[gl_vdest_indx], '\0', sizeof(TET_VDEST));
gl_tet_vdest[gl_vdest_indx].vdest =
vda_info.info.vdest_create.info.named.o_vdest;
gl_tet_vdest[gl_vdest_indx].mds_pwe1_hdl =
@@ -176,6 +195,7 @@ uint32_t create_named_vdest(bool persistent, NCS_VDEST_TYPE
policy, char *vname)
gl_tet_vdest[gl_vdest_indx].mds_vdest_hdl =
vda_info.info.vdest_create.o_mds_vdest_hdl;
gl_vdest_indx++;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nNAMED VDEST_CREATE has FAILED\n");
@@ -185,7 +205,7 @@ uint32_t create_named_vdest(bool persistent, NCS_VDEST_TYPE
policy, char *vname)
uint32_t destroy_named_vdest(bool non_persistent, MDS_DEST vdest, char *vname)
{
- memset(&vda_info, '\0', sizeof(vda_info));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_VDEST_DESTROY;
@@ -201,8 +221,10 @@ uint32_t destroy_named_vdest(bool non_persistent, MDS_DEST
vdest, char *vname)
if (ncsvda_api(&vda_info) == NCSCC_RC_SUCCESS) {
printf("\n %lld : VDEST_NAMED DESTROY is SUCCESSFULL\n",
(long long unsigned int)vdest);
+ pthread_rwlock_wrlock(&gl_lock);
memset(&gl_tet_vdest[gl_vdest_indx], '\0', sizeof(TET_VDEST));
gl_vdest_indx--;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf(
@@ -213,7 +235,7 @@ uint32_t destroy_named_vdest(bool non_persistent, MDS_DEST
vdest, char *vname)
MDS_DEST vdest_lookup(char *vname)
{
- memset(&vda_info, '\0', sizeof(vda_info));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_VDEST_LOOKUP;
@@ -241,7 +263,7 @@ MDS_DEST vdest_lookup(char *vname)
uint32_t vdest_change_role(MDS_DEST vdest, V_DEST_RL new_role)
{
- memset(&vda_info, '\0', sizeof(vda_info));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_VDEST_CHG_ROLE;
@@ -251,10 +273,14 @@ uint32_t vdest_change_role(MDS_DEST vdest, V_DEST_RL
new_role)
if (ncsvda_api(&vda_info) == NCSCC_RC_SUCCESS) {
/*Making sure vdest change role done*/
V_DEST_RL role = 0;
+ pthread_mutex_lock(&gl_mds_library_mutex);
mds_vdest_tbl_get_role(vdest, &role);
+ pthread_mutex_unlock(&gl_mds_library_mutex);
while (role != new_role) {
sleep(1);
+ pthread_mutex_lock(&gl_mds_library_mutex);
mds_vdest_tbl_get_role(vdest, &role);
+ pthread_mutex_unlock(&gl_mds_library_mutex);
}
printf("\nVDEST_CHANGE ROLE to %d is SUCCESSFULL", new_role);
return NCSCC_RC_SUCCESS;
@@ -265,7 +291,7 @@ uint32_t vdest_change_role(MDS_DEST vdest, V_DEST_RL
new_role)
uint32_t create_pwe_on_vdest(MDS_HDL mds_vdest_hdl, PW_ENV_ID pwe_id)
{
int i;
- memset(&vda_info, '\0', sizeof(vda_info));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_PWE_CREATE;
@@ -274,7 +300,7 @@ uint32_t create_pwe_on_vdest(MDS_HDL mds_vdest_hdl,
PW_ENV_ID pwe_id)
if (ncsvda_api(&vda_info) == NCSCC_RC_SUCCESS) {
printf("\nVDEST_PWE CREATE PWE= %d is SUCCESSFULL", pwe_id);
-
+ pthread_rwlock_wrlock(&gl_lock);
for (i = 0; i < gl_vdest_indx; i++) {
if (gl_tet_vdest[i].mds_vdest_hdl == mds_vdest_hdl) {
gl_tet_vdest[i]
@@ -287,6 +313,7 @@ uint32_t create_pwe_on_vdest(MDS_HDL mds_vdest_hdl,
PW_ENV_ID pwe_id)
gl_tet_vdest[i].pwe_count++;
}
}
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsvda_api: VDEST_PWE CREATE has FAILED");
@@ -297,13 +324,14 @@ uint32_t create_pwe_on_vdest(MDS_HDL mds_vdest_hdl,
PW_ENV_ID pwe_id)
uint32_t destroy_pwe_on_vdest(MDS_HDL mds_pwe_hdl)
{
int i, j;
- memset(&vda_info, '\0', sizeof(vda_info));
+ NCSVDA_INFO vda_info;
vda_info.req = NCSVDA_PWE_DESTROY;
vda_info.info.pwe_destroy.i_mds_pwe_hdl = mds_pwe_hdl;
if (ncsvda_api(&vda_info) == NCSCC_RC_SUCCESS) {
+ pthread_rwlock_wrlock(&gl_lock);
for (i = 0; i < gl_vdest_indx; i++) {
for (j = gl_tet_vdest[i].pwe_count - 1; j >= 0; j--) {
if (gl_tet_vdest[i].pwe[j].mds_pwe_hdl ==
@@ -313,6 +341,7 @@ uint32_t destroy_pwe_on_vdest(MDS_HDL mds_pwe_hdl)
}
}
}
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsvda_api: VDEST_PWE DESTROY has FAILED");
@@ -402,7 +431,7 @@ uint32_t mds_service_install(MDS_HDL mds_hdl, MDS_SVC_ID
svc_id,
if (ncsmds_api(&svc_to_mds_info) == NCSCC_RC_SUCCESS) {
printf("\n %d : SERVICE INSTALL is SUCCESSFULL", svc_id);
-
+ pthread_rwlock_wrlock(&gl_lock);
gl_tet_svc.dest = svc_to_mds_info.info.svc_install.o_dest;
if (m_MDS_DEST_IS_AN_ADEST(gl_tet_svc.dest) == 0) {
gl_tet_svc.anc = svc_to_mds_info.info.svc_install.o_anc;
@@ -422,6 +451,7 @@ uint32_t mds_service_install(MDS_HDL mds_hdl, MDS_SVC_ID
svc_id,
gl_tet_svc;
} else
gl_tet_adest.svc[gl_tet_adest.svc_count++] = gl_tet_svc;
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsmds_api: MDS INSTALL has FAILED\n");
@@ -444,7 +474,7 @@ uint32_t mds_service_uninstall(MDS_HDL mds_hdl, MDS_SVC_ID
svc_id)
if (ncsmds_api(&svc_to_mds_info) == NCSCC_RC_SUCCESS) {
printf("\n %d : SERVICE UNINSTALL is SUCCESSFULL", svc_id);
-
+ pthread_rwlock_wrlock(&gl_lock);
FOUND = 0;
/*VDEST*/
if (YES_ADEST == 0) {
@@ -501,6 +531,7 @@ uint32_t mds_service_uninstall(MDS_HDL mds_hdl, MDS_SVC_ID
svc_id)
}
}
}
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsmds_api: MDS UNINSTALL has FAILED\n");
@@ -540,7 +571,7 @@ uint32_t mds_service_subscribe(MDS_HDL mds_hdl, MDS_SVC_ID
svc_id,
if (ncsmds_api(&svc_to_mds_info) == NCSCC_RC_SUCCESS) {
printf("\n MDS SERVICE SUBSCRIBE is SUCCESSFULL");
-
+ pthread_rwlock_wrlock(&gl_lock);
FOUND = 0;
/*VDEST*/
if (YES_ADEST == 0) {
@@ -698,6 +729,7 @@ uint32_t mds_service_subscribe(MDS_HDL mds_hdl, MDS_SVC_ID
svc_id,
}
}
}
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf("\nRequest to ncsmds_api: MDS SUBSCRIBE has FAILED\n");
@@ -725,7 +757,7 @@ uint32_t mds_service_redundant_subscribe(MDS_HDL mds_hdl,
MDS_SVC_ID svc_id,
if (ncsmds_api(&svc_to_mds_info) == NCSCC_RC_SUCCESS) {
printf("\n MDS RED SUBSCRIBE is SUCCESSFULL");
-
+ pthread_rwlock_wrlock(&gl_lock);
FOUND = 0;
/*VDEST*/
if (YES_ADEST == 0) {
@@ -883,7 +915,7 @@ uint32_t mds_service_redundant_subscribe(MDS_HDL mds_hdl,
MDS_SVC_ID svc_id,
}
}
}
-
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf(
@@ -906,7 +938,7 @@ uint32_t mds_service_cancel_subscription(MDS_HDL mds_hdl,
MDS_SVC_ID svc_id,
if (ncsmds_api(&svc_to_mds_info) == NCSCC_RC_SUCCESS) {
printf("\n MDS CANCEL SUBSCRIBE is SUCCESSFULL");
-
+ pthread_rwlock_wrlock(&gl_lock);
FOUND = 0;
for (i = 0; i < gl_vdest_indx; i++) {
for (j = 0; j < gl_tet_vdest[i].svc_count; j++) {
@@ -947,6 +979,7 @@ uint32_t mds_service_cancel_subscription(MDS_HDL mds_hdl,
MDS_SVC_ID svc_id,
break;
}
}
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
} else {
printf(
@@ -1972,7 +2005,7 @@ uint32_t tet_mds_cb_rcv(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
/*Now Display what message did u receive*/
printf("\nReceived Message len = %d \nThe message is=%s",
msg->recvd_len, msg->recvd_data);
-
+ pthread_rwlock_wrlock(&gl_lock);
/*Now storing the message context on a global structure*/
gl_rcvdmsginfo.yr_svc_hdl =
mds_to_svc_info->i_yr_svc_hdl; /*the decider*/
@@ -1993,7 +2026,7 @@ uint32_t tet_mds_cb_rcv(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
gl_rcvdmsginfo.to_dest =
mds_to_svc_info->info.receive.i_to_dest; /*FIX THIS*/
gl_rcvdmsginfo.node_id = mds_to_svc_info->info.receive.i_node_id;
-
+ pthread_rwlock_unlock(&gl_lock);
return NCSCC_RC_SUCCESS;
}
uint32_t tet_mds_cb_direct_rcv(NCSMDS_CALLBACK_INFO *mds_to_svc_info)
@@ -2029,6 +2062,7 @@ uint32_t tet_mds_cb_direct_rcv(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
mds_to_svc_info->info.direct_receive.i_direct_buff);
fflush(stdout);
/*Now storing the message context on a global structure*/
+ pthread_rwlock_wrlock(&gl_lock);
gl_direct_rcvmsginfo.yr_svc_hdl =
mds_to_svc_info->i_yr_svc_hdl; /*the decider*/
@@ -2056,6 +2090,7 @@ uint32_t tet_mds_cb_direct_rcv(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
gl_direct_rcvmsginfo.msg_ctxt =
mds_to_svc_info->info.direct_receive.i_msg_ctxt;
}
+ pthread_rwlock_unlock(&gl_lock);
mds_free_direct_buff(
mds_to_svc_info->info.direct_receive.i_direct_buff);
return NCSCC_RC_SUCCESS;
@@ -2064,6 +2099,8 @@ uint32_t tet_mds_cb_direct_rcv(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
uint32_t tet_mds_svc_event(NCSMDS_CALLBACK_INFO *mds_to_svc_info)
{
int i, j, k;
+ TET_EVENT_INFO gl_event_data;
+
gl_event_data.ur_svc_id = mds_to_svc_info->info.svc_evt.i_your_id;
gl_event_data.event = mds_to_svc_info->info.svc_evt.i_change;
@@ -2076,6 +2113,7 @@ uint32_t tet_mds_svc_event(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
gl_event_data.rem_svc_pvt_ver =
mds_to_svc_info->info.svc_evt.i_rem_svc_pvt_ver;
gl_event_data.svc_pwe_hdl = mds_to_svc_info->info.svc_evt.svc_pwe_hdl;
+ pthread_rwlock_wrlock(&gl_lock);
if (is_service_on_adest(gl_event_data.svc_pwe_hdl,
gl_event_data.ur_svc_id) == NCSCC_RC_SUCCESS) {
printf("\nThe Subscriber Service id = %d is on ADEST",
@@ -2120,6 +2158,7 @@ uint32_t tet_mds_svc_event(NCSMDS_CALLBACK_INFO
*mds_to_svc_info)
}
}
}
+ pthread_rwlock_unlock(&gl_lock);
/*fill in the event info of this service*/
/* If this service is installed with MDS queue ownership model hen use
MDS_RETREIVE */
--
2.7.4
------------------------------------------------------------------------------
Check out the vibrant tech community on one of the world's most
engaging tech sites, Slashdot.org! http://sdm.link/slashdot
_______________________________________________
Opensaf-devel mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/opensaf-devel