src/lck/Makefile.am | 8 +- src/lck/agent/gla_api.c | 96 +++++++++++++ src/lck/agent/gla_cb.h | 4 + src/lck/agent/gla_evt.h | 11 +- src/lck/agent/gla_init.c | 26 +++ src/lck/agent/gla_mds.c | 55 +++++++ src/lck/common/glsv_defs.h | 9 +- src/lck/lckd/gld_api.c | 27 +++- src/lck/lckd/gld_cb.h | 2 +- src/lck/lckd/gld_evt.c | 105 +++++++++----- src/lck/lckd/gld_evt.h | 16 ++ src/lck/lckd/gld_rsc.c | 8 + src/lck/lcknd/glnd_agent.c | 22 +++ src/lck/lcknd/glnd_api.c | 33 +++- src/lck/lcknd/glnd_cb.c | 10 + src/lck/lcknd/glnd_cb.h | 12 + src/lck/lcknd/glnd_ckpt.c | 32 ++++ src/lck/lcknd/glnd_client.c | 175 +++++++++++++++++++++-- src/lck/lcknd/glnd_client.h | 10 + src/lck/lcknd/glnd_evt.c | 268 ++++++++++++++++++++++++++++++------- src/lck/lcknd/glnd_mds.c | 39 +++++ src/lck/lcknd/glnd_mds.h | 8 + src/lck/lcknd/glnd_res.c | 54 ++++++- src/lck/lcknd/glnd_res_req.c | 48 ++++++ src/lck/lcknd/glnd_restart.c | 60 ++++++++- src/smf/smfd/SmfCampaignThread.cc | 16 +- tools/cluster_sim_uml/opensaf | 4 +- 27 files changed, 1014 insertions(+), 144 deletions(-)
smfd core dumps during commit of campaign. If an AMF SU under maintenance fails right as the campaign commit is done, there is a race condition present. Before SMF clears the suMaintenaceCampaign attribute of the SU, if the SU fails, it will send a notification. Meanwhile, the commit deletes upgrade campaign pointer inside smfd. If the deletion of the campaign pointer inside smfd occurs before it receives the NTF event a crash will occur because the campaign pointer is gone. Solution is to always process NTF events before processing the termination of the campaign. The campaign termination code sets "m_running" to false, and deletes the pointer. This should always be last in the poll loop so that the loop will exit immediately without processing any NTF events (or other future events.) diff --git a/src/lck/Makefile.am b/src/lck/Makefile.am --- a/src/lck/Makefile.am +++ b/src/lck/Makefile.am @@ -152,11 +152,13 @@ bin_osaflcknd_SOURCES = \ src/lck/lcknd/glnd_res_req.c \ src/lck/lcknd/glnd_restart.c \ src/lck/lcknd/glnd_shm.c \ - src/lck/lcknd/glnd_tmr.c + src/lck/lcknd/glnd_tmr.c \ + src/lck/lcknd/glnd_clm.cc bin_osaflcknd_LDADD = \ lib/liblck_common.la \ lib/libSaAmf.la \ + lib/libSaClm.la \ lib/libopensaf_core.la bin_osaflckd_CPPFLAGS = \ @@ -175,11 +177,13 @@ bin_osaflckd_SOURCES = \ src/lck/lckd/gld_red.c \ src/lck/lckd/gld_rsc.c \ src/lck/lckd/gld_standby.c \ - src/lck/lckd/gld_tmr.c + src/lck/lckd/gld_tmr.c \ + src/lck/lckd/gld_clm.cc bin_osaflckd_LDADD = \ lib/liblck_common.la \ lib/libSaAmf.la \ + lib/libSaClm.la \ lib/libosaf_common.la \ lib/libSaImmOi.la \ lib/libSaImmOm.la \ diff --git a/src/lck/agent/gla_api.c b/src/lck/agent/gla_api.c --- a/src/lck/agent/gla_api.c +++ b/src/lck/agent/gla_api.c @@ -129,6 +129,9 @@ SaAisErrorT saLckInitialize(SaLckHandleT } rc = out_evt->error; if (rc == SA_AIS_OK) { + /* if the call succeeds we know glnd is a member of the cluster */ + gla_cb->isClusterMember = true; + /* create the client node and populate it */ client_info = gla_client_tree_find_and_add(gla_cb, out_evt->handle, true); if (client_info == NULL) { @@ -137,6 +140,9 @@ SaAisErrorT saLckInitialize(SaLckHandleT goto err; } + client_info->isStale = false; + memcpy(&client_info->version, version, sizeof(SaVersionT)); + /* copy the callbacks */ if (lckCallbacks) memcpy((void *)&client_info->lckCallbk, (void *)lckCallbacks, sizeof(SaLckCallbacksT)); @@ -237,6 +243,14 @@ SaAisErrorT saLckSelectionObjectGet(SaLc goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* everything's fine.. pass the sel obj to the appl */ *o_sel_obj = (SaSelectionObjectT)m_GET_FD_FROM_SEL_OBJ(m_NCS_IPC_GET_SEL_OBJ(&client_info->callbk_mbx)); @@ -297,6 +311,14 @@ SaAisErrorT saLckOptionCheck(SaLckHandle goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* populate the options - as this implementation support both Deadlock and orphan , set the values */ *lckOptions = SA_LCK_OPT_ORPHAN_LOCKS | SA_LCK_OPT_DEADLOCK_DETECTION; @@ -353,6 +375,14 @@ SaAisErrorT saLckDispatch(SaLckHandleT l goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + switch (flags) { case SA_DISPATCH_ONE: rc = gla_hdl_callbk_dispatch_one(gla_cb, client_info); @@ -563,6 +593,14 @@ SaAisErrorT saLckResourceOpen(SaLckHandl goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(client_info->lcl_lock_handle_id); @@ -713,6 +751,15 @@ SaAisErrorT saLckResourceOpenAsync(SaLck rc = SA_AIS_ERR_INIT; goto done; } + + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(client_info->lcl_lock_handle_id); @@ -826,6 +873,14 @@ SaAisErrorT saLckResourceClose(SaLckReso goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(lockResourceHandle); @@ -983,6 +1038,15 @@ SaAisErrorT saLckResourceLock(SaLckResou rc = SA_AIS_ERR_BAD_HANDLE; goto done; } + + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(lockResourceHandle); @@ -1162,6 +1226,14 @@ SaAisErrorT saLckResourceLockAsync(SaLck goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(lockResourceHandle); @@ -1307,6 +1379,14 @@ SaAisErrorT saLckResourceUnlock(SaLckLoc goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(lockId); @@ -1434,6 +1514,14 @@ SaAisErrorT saLckResourceUnlockAsync(SaI goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(lockId); @@ -1541,6 +1629,14 @@ SaAisErrorT saLckLockPurge(SaLckResource goto done; } + /* are we a member of the cluster? */ + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version)) { + if (!gla_cb->isClusterMember || client_info->isStale) { + rc = SA_AIS_ERR_UNAVAILABLE; + goto done; + } + } + /* check whether GLND is up or not */ if (!gla_cb->glnd_svc_up) { ncshm_give_hdl(lockResourceHandle); diff --git a/src/lck/agent/gla_cb.h b/src/lck/agent/gla_cb.h --- a/src/lck/agent/gla_cb.h +++ b/src/lck/agent/gla_cb.h @@ -38,6 +38,8 @@ typedef struct gla_client_info_tag { uint32_t client_context_id; SaLckCallbacksT lckCallbk; SaTimeT lcktimer; + bool isStale; + SaVersionT version; /* Mailbox Queue to store the messages for the clients */ SYSF_MBX callbk_mbx; NCS_PATRICIA_TREE client_res_tree; @@ -106,6 +108,7 @@ typedef struct gla_cb_tag { MDS_DEST glnd_mds_dest; bool glnd_svc_up; bool glnd_crashed; + bool isClusterMember; /* GLA data */ NCS_PATRICIA_TREE gla_client_tree; /* GLA_CLIENT_INFO - node */ @@ -131,6 +134,7 @@ uint32_t gla_client_tree_init(GLA_CB *cb void gla_client_tree_destroy(GLA_CB *gla_cb); void gla_client_tree_cleanup(GLA_CB *gla_cb); GLA_CLIENT_INFO *gla_client_tree_find_and_add(GLA_CB *gla_cb, SaLckHandleT hdl_id, bool flag); +GLA_CLIENT_INFO *gla_client_tree_find_next(GLA_CB *gla_cb, SaLckHandleT hdl_id); uint32_t gla_client_tree_delete_node(GLA_CB *gla_cb, GLA_CLIENT_INFO *client_info, bool give_hdl); GLA_CLIENT_RES_INFO *gla_client_res_tree_find_and_add(GLA_CLIENT_INFO *client_info, SaLckResourceIdT res_id, bool flag); diff --git a/src/lck/agent/gla_evt.h b/src/lck/agent/gla_evt.h --- a/src/lck/agent/gla_evt.h +++ b/src/lck/agent/gla_evt.h @@ -34,6 +34,7 @@ typedef enum glsv_gla_evt_type { GLSV_GLA_CALLBK_EVT, GLSV_GLA_API_RESP_EVT, + GLSV_GLA_CLM_EVT, GLSV_GLA_EVT_MAX } GLSV_GLA_EVT_TYPE; @@ -45,7 +46,9 @@ typedef enum glsv_gla_api_resp_evt_type_ GLSV_GLA_LOCK_SYNC_LOCK, GLSV_GLA_LOCK_SYNC_UNLOCK, GLSV_GLA_NODE_OPERATIONAL, - GLSV_GLA_LOCK_PURGE + GLSV_GLA_LOCK_PURGE, + GLSV_GLA_NODE_LEFT, + GLSV_GLA_NODE_JOINED } GLSV_GLA_API_RESP_EVT_TYPE; typedef struct glsv_gla_evt_lock_initialise_param_tag { @@ -87,6 +90,11 @@ typedef struct glsv_gla_api_resp_info { } param; } GLSV_GLA_API_RESP_INFO; +/* CLM param definitions */ +typedef struct glsv_gla_clm_info { + bool isClusterMember; +} GLSV_GLA_CLM_INFO; + /* For GLND to GLA communication */ typedef struct glsv_gla_evt { SaLckHandleT handle; @@ -95,6 +103,7 @@ typedef struct glsv_gla_evt { union { GLSV_GLA_CALLBACK_INFO gla_clbk_info; /* callbk info */ GLSV_GLA_API_RESP_INFO gla_resp_info; /* api response info */ + GLSV_GLA_CLM_INFO gla_clm_info; /* clm info */ } info; } GLSV_GLA_EVT; diff --git a/src/lck/agent/gla_init.c b/src/lck/agent/gla_init.c --- a/src/lck/agent/gla_init.c +++ b/src/lck/agent/gla_init.c @@ -456,6 +456,32 @@ GLA_CLIENT_INFO *gla_client_tree_find_an } /**************************************************************************** + Name : gla_client_tree_find_next + + Description : This routine returns the next client + + Arguments : + gla_cb : pointer to the gla control block. + hdl_id : the handle id. + + Return Values : returns the client_info node. + + Notes : None +******************************************************************************/ +GLA_CLIENT_INFO *gla_client_tree_find_next(GLA_CB *gla_cb, SaLckHandleT hdl_id) +{ + GLA_CLIENT_INFO *client_info = NULL; + TRACE_ENTER(); + + /* take the cb lock */ + m_NCS_LOCK(&gla_cb->cb_lock, NCS_LOCK_READ); + client_info = (GLA_CLIENT_INFO *)ncs_patricia_tree_getnext(&gla_cb->gla_client_tree, (uint8_t *)&hdl_id); + m_NCS_UNLOCK(&gla_cb->cb_lock, NCS_LOCK_READ); + + return client_info; +} + +/**************************************************************************** Name : gla_client_info_send Description : diff --git a/src/lck/agent/gla_mds.c b/src/lck/agent/gla_mds.c --- a/src/lck/agent/gla_mds.c +++ b/src/lck/agent/gla_mds.c @@ -51,6 +51,7 @@ static uint32_t glsv_enc_initialize_evt( static uint32_t glsv_enc_reg_unreg_agent_evt(NCS_UBAID *uba, GLSV_EVT_AGENT_INFO *evt); static uint32_t glsv_gla_dec_callbk_evt(NCS_UBAID *uba, GLSV_GLA_CALLBACK_INFO *evt); static uint32_t glsv_gla_dec_api_resp_evt(NCS_UBAID *uba, GLSV_GLA_API_RESP_INFO *evt); +static uint32_t glsv_gla_dec_clm_evt(NCS_UBAID *uba, GLSV_GLA_CLM_INFO *evt); uint32_t gla_mds_get_handle(GLA_CB *cb); @@ -494,6 +495,10 @@ static uint32_t gla_mds_dec(GLA_CB *cb, glsv_gla_dec_api_resp_evt(uba, &evt->info.gla_resp_info); break; + case GLSV_GLA_CLM_EVT: + glsv_gla_dec_clm_evt(uba, &evt->info.gla_clm_info); + break; + default: goto end; } @@ -612,6 +617,20 @@ static uint32_t gla_mds_rcv(GLA_CB *cb, m_MMGR_FREE_GLA_CALLBACK_INFO(gla_callbk_info); } goto end; + } else if (evt->type == GLSV_GLA_CLM_EVT) { + cb->isClusterMember = evt->info.gla_clm_info.isClusterMember; + + if (!cb->isClusterMember) { + /* tell all clients they are stale now */ + GLA_CLIENT_INFO *client_info; + SaLckHandleT lckHandle = 0; + for (client_info = gla_client_tree_find_next(cb, lckHandle); + client_info; + client_info = gla_client_tree_find_next(cb, client_info->lock_handle_id)) { + client_info->isStale = true; + } + } + m_MMGR_FREE_GLA_EVT(evt); } else { if (evt) m_MMGR_FREE_GLA_EVT(evt); @@ -1415,3 +1434,39 @@ static uint32_t glsv_gla_dec_api_resp_ev TRACE_LEAVE(); return rc; } + +/**************************************************************************** + Name : glsv_gla_DEC_clm_evt + + Description : This routine decodes clm info. + + Arguments : uba , clm info. + + Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE + + Notes : None. +******************************************************************************/ +static uint32_t glsv_gla_dec_clm_evt(NCS_UBAID *uba, GLSV_GLA_CLM_INFO *evt) +{ + uint8_t *p8, local_data[20], size; + uint32_t rc = NCSCC_RC_SUCCESS; + TRACE_ENTER(); + + do { + /** decode the type of message **/ + size = (4); + p8 = ncs_dec_flatten_space(uba, local_data, size); + if (!p8) { + TRACE_2("GLA mds dec failure"); + rc = NCSCC_RC_FAILURE; + break; + } + + osaf_decode_bool(uba, &evt->isClusterMember); + + ncs_dec_skip_space(uba, size); + } while (false); + + TRACE_LEAVE(); + return rc; +} diff --git a/src/lck/common/glsv_defs.h b/src/lck/common/glsv_defs.h --- a/src/lck/common/glsv_defs.h +++ b/src/lck/common/glsv_defs.h @@ -42,13 +42,18 @@ typedef unsigned int GLSV_TIMER_ID; /* Version Constants */ #define REQUIRED_RELEASECODE 'B' -#define REQUIRED_MAJORVERSION 01 -#define REQUIRED_MINORVERSION 01 +#define REQUIRED_MAJORVERSION 3 +#define REQUIRED_MINORVERSION 1 #define m_GLA_VER_IS_VALID(ver) \ ((ver->releaseCode == REQUIRED_RELEASECODE) && \ (ver->majorVersion <= REQUIRED_MAJORVERSION)) +#define m_GLA_VER_IS_AT_LEAST_B_3(ver) \ + ((ver.releaseCode == 'B' && \ + ver.majorVersion >= 3) || \ + ver.releaseCode > 'B') + #define MSG_FRMT_VER uint32_t /*** Macro used to get the AMF version used ****/ #define m_GLSV_GET_AMF_VER(amf_ver) amf_ver.releaseCode='B'; amf_ver.majorVersion=0x01; amf_ver.minorVersion=0x01; diff --git a/src/lck/lckd/gld_api.c b/src/lck/lckd/gld_api.c --- a/src/lck/lckd/gld_api.c +++ b/src/lck/lckd/gld_api.c @@ -29,6 +29,7 @@ #include <string.h> #include <stdlib.h> #include "gld_imm.h" +#include "gld_clm.h" uint32_t gl_gld_hdl; @@ -40,6 +41,7 @@ enum { FD_MBCSV, FD_MBX, FD_IMM, + FD_CLM, NUM_FD }; @@ -198,6 +200,13 @@ uint32_t gld_se_lib_init(NCS_LIB_REQ_INF } else TRACE_1("AMF Health Check started"); + amf_error = gld_clm_init(gld_cb); + if (amf_error != SA_AIS_OK) { + LOG_ER("CLM Init Failed %u\n", amf_error); + res = NCSCC_RC_FAILURE; + goto end; + } + if ((res = initialize_for_assignment(gld_cb, gld_cb->ha_state)) != NCSCC_RC_SUCCESS) { LOG_ER("initialize_for_assignment FAILED %u", (unsigned) res); @@ -488,7 +497,7 @@ void gld_main_process(SYSF_MBX *mbx) SaAisErrorT error = SA_AIS_OK; GLSV_GLD_CB *gld_cb = NULL; NCS_MBCSV_ARG mbcsv_arg; - SaSelectionObjectT amf_sel_obj; + SaSelectionObjectT amf_sel_obj, clm_sel_obj; int term_fd; TRACE_ENTER(); @@ -507,6 +516,13 @@ void gld_main_process(SYSF_MBX *mbx) goto end; } + error = saClmSelectionObjectGet(gld_cb->clm_hdl, &clm_sel_obj); + + if (error != SA_AIS_OK) { + LOG_ER("CLM Selection object get error: %i", error); + goto end; + } + daemon_sigterm_install(&term_fd); /* Set up all file descriptors to listen to */ @@ -518,6 +534,8 @@ void gld_main_process(SYSF_MBX *mbx) fds[FD_MBX].events = POLLIN; fds[FD_IMM].fd = gld_cb->imm_sel_obj; fds[FD_IMM].events = POLLIN; + fds[FD_CLM].fd = clm_sel_obj; + fds[FD_CLM].events = POLLIN; while (1) { fds[FD_MBCSV].fd = gld_cb->mbcsv_sel_obj; @@ -602,6 +620,13 @@ void gld_main_process(SYSF_MBX *mbx) } } + if (fds[FD_CLM].revents & POLLIN) { + /* dispatch all the CLM pending function */ + error = saClmDispatch(gld_cb->clm_hdl, SA_DISPATCH_ALL); + if (error != SA_AIS_OK) { + LOG_ER("CLM dispatch failed: %i", error); + } + } } end: TRACE_LEAVE(); diff --git a/src/lck/lckd/gld_cb.h b/src/lck/lckd/gld_cb.h --- a/src/lck/lckd/gld_cb.h +++ b/src/lck/lckd/gld_cb.h @@ -81,7 +81,6 @@ typedef struct glsv_gld_cb_tag { uint8_t hm_poolid; /* For use with handle manager */ NCSCONTEXT task_hdl; uint32_t my_hdl; /* Handle manager handle */ - uint32_t clm_hdl; /* Handle manager handle */ NCS_MBCSV_HDL mbcsv_handle; NCS_MBCSV_CKPT_HDL o_ckpt_hdl; SaSelectionObjectT mbcsv_sel_obj; @@ -99,6 +98,7 @@ typedef struct glsv_gld_cb_tag { SaLckResourceIdT nxt_rsc_id; /* Next rsc id to be generated */ SaLckResourceIdT prev_rsc_id; /* Prev res_id to be used for next cold sync rsp */ + SaClmHandleT clm_hdl; SaAmfHandleT amf_hdl; /* AMF handle, obtained thru AMF init */ SaAmfHAStateT ha_state; /* present AMF HA state of the component */ diff --git a/src/lck/lckd/gld_evt.c b/src/lck/lckd/gld_evt.c --- a/src/lck/lckd/gld_evt.c +++ b/src/lck/lckd/gld_evt.c @@ -39,6 +39,7 @@ static uint32_t gld_quisced_process(GLSV static uint32_t gld_process_send_non_master_status(GLSV_GLD_CB *gld_cb, GLSV_GLD_GLND_DETAILS *node_details, uint32_t status); static uint32_t gld_process_send_non_master_info(GLSV_GLD_CB *gld_cb, GLSV_GLD_GLND_RSC_REF *glnd_rsc, GLSV_GLD_GLND_DETAILS *node_details, uint32_t status); +static uint32_t gld_clm_glnd_down(GLSV_GLD_EVT *evt); /* GLD dispatch table */ static const @@ -51,7 +52,8 @@ GLSV_GLD_EVT_HANDLER gld_evt_dispatch_tb gld_debug_dump_cb, gld_process_tmr_resource_reelection_wait_timeout, gld_process_tmr_node_restart_wait_timeout, - gld_quisced_process + gld_quisced_process, + gld_clm_glnd_down }; /**************************************************************************** @@ -604,53 +606,17 @@ static uint32_t gld_send_res_master_info static uint32_t gld_mds_glnd_down(GLSV_GLD_EVT *evt) { GLSV_GLD_CB *gld_cb = evt->gld_cb; - GLSV_GLD_GLND_DETAILS *node_details = NULL; - GLSV_GLD_RSC_INFO *rsc_info; uint32_t node_id; uint32_t rc = NCSCC_RC_FAILURE; TRACE_ENTER2("mds identification %u",gld_cb->my_dest_id ); node_id = m_NCS_NODE_ID_FROM_MDS_DEST(evt->info.glnd_mds_info.mds_dest_id); - if ((evt == GLSV_GLD_EVT_NULL) || (gld_cb == NULL)) - goto end; + evt->info.glnd_clm_info.nodeId = node_id; + evt->info.glnd_clm_info.isClusterMember = false; - memcpy(&evt->fr_dest_id, &evt->info.glnd_mds_info.mds_dest_id, sizeof(MDS_DEST) - ); + gld_clm_glnd_down(evt); - if ((node_details = (GLSV_GLD_GLND_DETAILS *)ncs_patricia_tree_get(&gld_cb->glnd_details, - (uint8_t *)&node_id)) == NULL) { - TRACE_1("Resource details is empty for glnd on node_id %u ", node_id); - rc = NCSCC_RC_SUCCESS; - goto end; - } - node_details->status = GLND_RESTART_STATE; - - TRACE("EVT Processing MDS GLND DOWN: node_id %u", node_details->node_id); - memcpy(&node_details->restart_timer.mdest_id, &node_details->dest_id, sizeof(MDS_DEST)); - - /* Start GLSV_GLD_GLND_RESTART_TIMEOUT timer */ - gld_start_tmr(gld_cb, &node_details->restart_timer, GLD_TMR_NODE_RESTART_TIMEOUT, GLD_NODE_RESTART_TIMEOUT, 0); - - /* Check whether this node is master for any resource, if yes send the status to all - the - non master nodes */ - if (gld_cb->ha_state == SA_AMF_HA_ACTIVE) { - /* Check whether this node is master for any resource, if yes send the status to all the non master nodes */ - rsc_info = gld_cb->rsc_info; - while (rsc_info != NULL) { - if (rsc_info->node_list) { - if (rsc_info->node_list->node_id == node_details->node_id) - gld_snd_master_status(gld_cb, rsc_info, GLND_RESOURCE_MASTER_RESTARTED); - } - rsc_info = rsc_info->next; - } - - /* If this node is non master for any resource, then send node status to the master */ - gld_process_send_non_master_status(gld_cb, node_details, GLND_RESTART_STATE); - - } - end: TRACE_LEAVE2("Return value: %u", rc); return rc; } @@ -965,3 +931,62 @@ static uint32_t gld_process_send_non_mas return res; } + +/**************************************************************************** + * Name : gld_clm_glnd_down + * + * Description : CLM indicated that a glnd has gone down + * + * Arguments : evt - Event structure + * + * Return Values : NCSCC_RC_SUCCESS/ NCSCC_RC_FAILURE + * + * Notes : None. + *****************************************************************************/ +static uint32_t gld_clm_glnd_down(GLSV_GLD_EVT *evt) +{ + GLSV_GLD_CB *gld_cb = evt->gld_cb; + GLSV_GLD_GLND_DETAILS *node_details = NULL; + GLSV_GLD_RSC_INFO *rsc_info; + uint32_t node_id; + uint32_t rc = NCSCC_RC_FAILURE; + TRACE_ENTER2("mds identification %u",gld_cb->my_dest_id ); + + node_id = evt->info.glnd_clm_info.nodeId; + + if ((node_details = (GLSV_GLD_GLND_DETAILS *)ncs_patricia_tree_get(&gld_cb->glnd_details, + (uint8_t *)&node_id)) == NULL) { + TRACE_1("Resource details is empty for glnd on node_id %u ", node_id); + rc = NCSCC_RC_SUCCESS; + goto end; + } + node_details->status = GLND_RESTART_STATE; + + TRACE("EVT Processing CLM GLND DOWN: node_id %u", node_details->node_id); + memcpy(&node_details->restart_timer.mdest_id, &node_details->dest_id, sizeof(MDS_DEST)); + + /* Start GLSV_GLD_GLND_RESTART_TIMEOUT timer */ + gld_start_tmr(gld_cb, &node_details->restart_timer, GLD_TMR_NODE_RESTART_TIMEOUT, GLD_NODE_RESTART_TIMEOUT, 0); + + /* Check whether this node is master for any resource, if yes send the status to all + the + non master nodes */ + if (gld_cb->ha_state == SA_AMF_HA_ACTIVE) { + /* Check whether this node is master for any resource, if yes send the status to all the non master nodes */ + rsc_info = gld_cb->rsc_info; + while (rsc_info != NULL) { + if (rsc_info->node_list) { + if (rsc_info->node_list->node_id == node_details->node_id) + gld_snd_master_status(gld_cb, rsc_info, GLND_RESOURCE_MASTER_RESTARTED); + } + rsc_info = rsc_info->next; + } + + /* If this node is non master for any resource, then send node status to the master */ + gld_process_send_non_master_status(gld_cb, node_details, GLND_RESTART_STATE); + + } + end: + TRACE_LEAVE2("Return value: %u", rc); + return rc; +} diff --git a/src/lck/lckd/gld_evt.h b/src/lck/lckd/gld_evt.h --- a/src/lck/lckd/gld_evt.h +++ b/src/lck/lckd/gld_evt.h @@ -18,6 +18,10 @@ #ifndef LCK_LCKD_GLD_EVT_H_ #define LCK_LCKD_GLD_EVT_H_ +#ifdef __cplusplus +extern "C" { +#endif + /***************************************************************************** * Message Type of GLND *****************************************************************************/ @@ -36,6 +40,8 @@ typedef enum glsv_gld_evt_type { GLSV_GLD_EVT_RESTART_TIMEOUT, GLSV_GLD_EVT_QUISCED_STATE, + GLSV_GLD_EVT_GLND_DOWN_CLM, + GLSV_GLD_EVT_MAX } GLSV_GLD_EVT_TYPE; @@ -66,6 +72,11 @@ typedef struct gld_evt_tmr_tag { uint32_t opq_hdl; } GLD_EVT_TMR; +typedef struct gld_evt_node_info { + uint32_t nodeId; + bool isClusterMember; +} GLSV_GLD_GLND_NODE_INFO; + /***************************************************************************** * GLD msg data structure. *****************************************************************************/ @@ -79,6 +90,7 @@ typedef struct glsv_gld_evt_tag { GLSV_RSC_DETAILS rsc_details; GLSV_GLD_GLND_MDS_INFO glnd_mds_info; GLD_EVT_TMR tmr; + GLSV_GLD_GLND_NODE_INFO glnd_clm_info; } info; } GLSV_GLD_EVT; @@ -90,4 +102,8 @@ typedef uint32_t (*GLSV_GLD_EVT_HANDLER) void gld_evt_destroy(GLSV_GLD_EVT *evt); uint32_t gld_process_evt(GLSV_GLD_EVT *evt); +#ifdef __cplusplus +} +#endif + #endif // LCK_LCKD_GLD_EVT_H_ diff --git a/src/lck/lckd/gld_rsc.c b/src/lck/lckd/gld_rsc.c --- a/src/lck/lckd/gld_rsc.c +++ b/src/lck/lckd/gld_rsc.c @@ -334,6 +334,8 @@ void gld_snd_master_status(GLSV_GLD_CB * NCSMDS_INFO snd_mds; uint32_t res; + TRACE_ENTER(); + memset(&snd_mds, '\0', sizeof(NCSMDS_INFO)); /*TBD need to check rsc_info */ @@ -366,6 +368,8 @@ void gld_snd_master_status(GLSV_GLD_CB * LOG_ER("MDS Send failed"); return; } + + TRACE_LEAVE(); return; } @@ -391,7 +395,10 @@ void gld_rsc_rmv_node_ref(GLSV_GLD_CB *g GLSV_NODE_LIST **node_list, *free_node_list = NULL; bool chg_master = false; + TRACE_ENTER(); + if (glnd_rsc == NULL || rsc_info == NULL) { + TRACE_LEAVE(); return; } if (rsc_info->node_list->node_id == node_details->node_id) @@ -439,6 +446,7 @@ void gld_rsc_rmv_node_ref(GLSV_GLD_CB *g gld_snd_master_status(gld_cb, rsc_info, GLND_RESOURCE_ELECTION_IN_PROGESS); } + TRACE_LEAVE(); return; } diff --git a/src/lck/lcknd/glnd_agent.c b/src/lck/lcknd/glnd_agent.c --- a/src/lck/lcknd/glnd_agent.c +++ b/src/lck/lcknd/glnd_agent.c @@ -54,6 +54,28 @@ GLND_AGENT_INFO *glnd_agent_node_find(GL } /***************************************************************************** + PROCEDURE NAME : glnd_agent_node_find_next + + DESCRIPTION : Finds the Agent info node from the tree. + + ARGUMENTS :glnd_cb - ptr to the GLND control block + mds_handle_id - vcard id of the agent. + + RETURNS :The pointer to the agent info node + + NOTES : None +*****************************************************************************/ +GLND_AGENT_INFO *glnd_agent_node_find_next(GLND_CB *glnd_cb, MDS_DEST agent_mds_dest) +{ + GLND_AGENT_INFO *agent_info; + + /* search for the agent id */ + agent_info = (GLND_AGENT_INFO *)ncs_patricia_tree_getnext(&glnd_cb->glnd_agent_tree, (uint8_t *)&agent_mds_dest); + + return agent_info; +} + +/***************************************************************************** PROCEDURE NAME : glnd_agent_node_add DESCRIPTION : Adds the Agent node to the Agent tree. diff --git a/src/lck/lcknd/glnd_api.c b/src/lck/lcknd/glnd_api.c --- a/src/lck/lcknd/glnd_api.c +++ b/src/lck/lcknd/glnd_api.c @@ -37,6 +37,7 @@ enum { FD_TERM = 0, FD_AMF, FD_MBX, + FD_CLM, NUM_FD }; @@ -191,9 +192,10 @@ void glnd_main_process(SYSF_MBX *mbx) TRACE_ENTER(); SaAmfHandleT amf_hdl; + SaClmHandleT clm_hdl; - SaSelectionObjectT amf_sel_obj; - SaAisErrorT amf_error; + SaSelectionObjectT amf_sel_obj, clm_sel_obj; + SaAisErrorT ais_error; struct pollfd sel[NUM_FD]; int term_fd; @@ -206,16 +208,23 @@ void glnd_main_process(SYSF_MBX *mbx) } amf_hdl = glnd_cb->amf_hdl; + clm_hdl = glnd_cb->clm_hdl; /*giveup the handle */ m_GLND_GIVEUP_GLND_CB; - amf_error = saAmfSelectionObjectGet(amf_hdl, &amf_sel_obj); - if (amf_error != SA_AIS_OK) { + ais_error = saAmfSelectionObjectGet(amf_hdl, &amf_sel_obj); + if (ais_error != SA_AIS_OK) { LOG_ER("GLND amf get sel obj error"); goto end; } + ais_error = saClmSelectionObjectGet(clm_hdl, &clm_sel_obj); + if (ais_error != SA_AIS_OK) { + LOG_ER("GLND clm get sel obj error"); + goto end; + } + daemon_sigterm_install(&term_fd); sel[FD_TERM].fd = term_fd; @@ -224,6 +233,8 @@ void glnd_main_process(SYSF_MBX *mbx) sel[FD_AMF].events = POLLIN; sel[FD_MBX].fd = m_GET_FD_FROM_SEL_OBJ(mbx_fd); sel[FD_MBX].events = POLLIN; + sel[FD_CLM].fd = clm_sel_obj; + sel[FD_CLM].events = POLLIN; while (osaf_poll(&sel[0], NUM_FD, -1) > 0) { @@ -231,7 +242,7 @@ void glnd_main_process(SYSF_MBX *mbx) daemon_exit(); } - if (((sel[FD_AMF].revents | sel[FD_MBX].revents) & + if (((sel[FD_AMF].revents | sel[FD_MBX].revents | sel[FD_CLM].revents) & (POLLERR | POLLHUP | POLLNVAL)) != 0) { LOG_ER("GLND poll() failure: %hd %hd", sel[FD_AMF].revents, sel[FD_MBX].revents); @@ -241,8 +252,8 @@ void glnd_main_process(SYSF_MBX *mbx) /* process all the AMF messages */ if (sel[FD_AMF].revents & POLLIN) { /* dispatch all the AMF pending function */ - amf_error = saAmfDispatch(amf_hdl, SA_DISPATCH_ALL); - if (amf_error != SA_AIS_OK) { + ais_error = saAmfDispatch(amf_hdl, SA_DISPATCH_ALL); + if (ais_error != SA_AIS_OK) { TRACE_2("GLND amf dispatch failure"); } } @@ -256,6 +267,14 @@ void glnd_main_process(SYSF_MBX *mbx) } else break; } + /* process all the AMF messages */ + if (sel[FD_CLM].revents & POLLIN) { + /* dispatch all the CLM pending function */ + ais_error = saClmDispatch(clm_hdl, SA_DISPATCH_ALL); + if (ais_error != SA_AIS_OK) { + TRACE_2("GLND clm dispatch failure"); + } + } } TRACE("DANGER: Exiting the Select loop of GLND"); diff --git a/src/lck/lcknd/glnd_cb.c b/src/lck/lcknd/glnd_cb.c --- a/src/lck/lcknd/glnd_cb.c +++ b/src/lck/lcknd/glnd_cb.c @@ -27,6 +27,7 @@ ******************************************************************************/ #include "lck/lcknd/glnd.h" +#include "lck/lcknd/glnd_clm.h" uint32_t gl_glnd_hdl; NCSCONTEXT gl_glnd_task_hdl; @@ -113,6 +114,13 @@ GLND_CB *glnd_cb_create(uint32_t pool_id } else TRACE_1("GLND mds register success"); + /* Initialise with the CLM service */ + if (glnd_clm_init(glnd_cb) != NCSCC_RC_SUCCESS) { + LOG_ER("GLND clm init failed"); + goto clm_init_err; + } else + TRACE_1("GLND clm init success"); + /* Initialise with the AMF service */ if (glnd_amf_init(glnd_cb) != NCSCC_RC_SUCCESS) { LOG_ER("GLND amf init failed"); @@ -170,6 +178,8 @@ GLND_CB *glnd_cb_create(uint32_t pool_id glnd_amf_de_init(glnd_cb); amf_init_err: glnd_mds_unregister(glnd_cb); + clm_init_err: + glnd_clm_deinit(glnd_cb); mds_err: m_NCS_EDU_HDL_FLUSH(&glnd_cb->glnd_edu_hdl); m_NCS_IPC_DETACH(&glnd_cb->glnd_mbx, glnd_cleanup_mbx, glnd_cb); diff --git a/src/lck/lcknd/glnd_cb.h b/src/lck/lcknd/glnd_cb.h --- a/src/lck/lcknd/glnd_cb.h +++ b/src/lck/lcknd/glnd_cb.h @@ -23,6 +23,10 @@ #include "glnd_tmr.h" #include "lck/lcknd/glnd_evt.h" +#ifdef __cplusplus +extern "C" { +#endif + /* global variables */ uint32_t gl_glnd_hdl; NCSCONTEXT gl_glnd_task_hdl; @@ -177,6 +181,7 @@ typedef struct glnd_cb_tag { /* Information about the GLD */ MDS_DEST gld_mdest_id; bool gld_card_up; + bool isClusterMember; /* GLND data */ NCS_PATRICIA_TREE glnd_client_tree; /* GLND_CLIENT_INFO - node */ @@ -187,6 +192,7 @@ typedef struct glnd_cb_tag { GLND_RES_LOCK_LIST_INFO *non_mst_orphan_list; struct glsv_glnd_evt *evt_bckup_q; /* backup the events incase of mastership change */ + SaClmHandleT clm_hdl; /* CLM handle, obtained thru CLM init */ SaAmfHandleT amf_hdl; /* AMF handle, obtained thru AMF init */ SaAmfHAStateT ha_state; /* present AMF HA state of the component */ EDU_HDL glnd_edu_hdl; /* edu handle used for encode/decode */ @@ -198,6 +204,7 @@ typedef struct glnd_cb_tag { /* prototypes */ GLND_AGENT_INFO *glnd_agent_node_find(GLND_CB *glnd_cb, MDS_DEST agent_mds_dest); +GLND_AGENT_INFO *glnd_agent_node_find_next(GLND_CB *, MDS_DEST); GLND_AGENT_INFO *glnd_agent_node_add(GLND_CB *glnd_cb, MDS_DEST agent_mds_dest, uint32_t process_id); void glnd_agent_node_del(GLND_CB *glnd_cb, GLND_AGENT_INFO *agent_info); @@ -213,6 +220,7 @@ GLND_RESOURCE_REQ_LIST *glnd_resource_re MDS_SYNC_SND_CTXT *mds_ctxt, SaLckResourceIdT lcl_resource_id); GLND_RESOURCE_REQ_LIST *glnd_resource_req_node_find(GLND_CB *glnd_cb, SaNameT *resource_name); +void glnd_resource_req_node_down(GLND_CB *glnd_cb); void glnd_resource_req_node_del(GLND_CB *glnd_cb, uint32_t res_req_hdl); /* Amf prototypes */ @@ -233,4 +241,8 @@ uint8_t glnd_cpsv_initilize(GLND_CB *gln uint32_t glnd_shm_create(GLND_CB *cb); uint32_t glnd_shm_destroy(GLND_CB *cb, char shm_name[]); +#ifdef __cplusplus +} +#endif + #endif // LCK_LCKND_GLND_CB_H_ diff --git a/src/lck/lcknd/glnd_ckpt.c b/src/lck/lcknd/glnd_ckpt.c --- a/src/lck/lcknd/glnd_ckpt.c +++ b/src/lck/lcknd/glnd_ckpt.c @@ -81,6 +81,18 @@ uint32_t glnd_restart_resource_info_ckpt restart_resource_info.pr_orphaned = res_info->lck_master_info.pr_orphaned; restart_resource_info.ex_orphaned = res_info->lck_master_info.ex_orphaned; + TRACE("resource_id: %i lcl_ref_cnt: %i status: %i master_status: %i " + "pr_orphan_req_count: %i ex_orphan_req_count: %i pr_orphaned: %i " + "ex_orphaned: %i", + restart_resource_info.resource_id, + restart_resource_info.lcl_ref_cnt, + restart_resource_info.status, + restart_resource_info.master_status, + restart_resource_info.pr_orphan_req_count, + restart_resource_info.ex_orphan_req_count, + restart_resource_info.pr_orphaned, + restart_resource_info.ex_orphaned); + /* Find valid sections to write res info in the shared memory */ glnd_find_res_shm_ckpt_empty_section(glnd_cb, &shm_index); restart_resource_info.shm_index = res_info->shm_index = shm_index; @@ -220,6 +232,7 @@ uint32_t glnd_restart_res_lock_list_ckpt NCS_OS_POSIX_SHM_REQ_INFO lck_list_info_write; uint32_t rc = NCSCC_RC_SUCCESS; uint32_t shm_index; + int i; TRACE_ENTER2("Resource id: %u", res_id); memset(&restart_res_lock_list_info, 0, sizeof(GLND_RESTART_RES_LOCK_LIST_INFO)); @@ -237,6 +250,21 @@ uint32_t glnd_restart_res_lock_list_ckpt restart_res_lock_list_info.to_which_list = to_which_list; restart_res_lock_list_info.non_master_status = res_lock_list->non_master_status; + TRACE("lcl_resource_id: %u lck_info_hdl_id: %u " + "unlock_req_sent: %i unlock_call_type: %i to_which_list: %i " + "non_master_status: %i lockid: %llu lcl_lockid: %llu", + restart_res_lock_list_info.lcl_resource_id, + restart_res_lock_list_info.lck_info_hdl_id, + restart_res_lock_list_info.unlock_req_sent, + restart_res_lock_list_info.unlock_call_type, + restart_res_lock_list_info.to_which_list, + restart_res_lock_list_info.non_master_status, + restart_res_lock_list_info.lock_info.lockid, + restart_res_lock_list_info.lock_info.lcl_lockid); + + for (i = 0; i < restart_res_lock_list_info.glnd_res_lock_mds_ctxt.length; i++) + TRACE("glnd_res_lock_mds_ctxt[%i]: %u", i, restart_res_lock_list_info.glnd_res_lock_mds_ctxt.data[i]); + /* Find valid sections to write res info in the shared memory */ glnd_find_lck_shm_ckpt_empty_section(glnd_cb, &shm_index); restart_res_lock_list_info.shm_index = res_lock_list->shm_index = shm_index; @@ -281,6 +309,7 @@ uint32_t glnd_restart_res_lock_list_ckpt NCS_OS_POSIX_SHM_REQ_INFO lck_list_info_write; GLND_RESTART_RES_LOCK_LIST_INFO *shm_base_addr = NULL; uint32_t rc = NCSCC_RC_SUCCESS; + int i; TRACE_ENTER2("resource_id %u", res_id); shm_base_addr = glnd_cb->glnd_lck_shm_base_addr; @@ -299,6 +328,9 @@ uint32_t glnd_restart_res_lock_list_ckpt restart_res_lock_list_info.shm_index = res_lock_list->shm_index; restart_res_lock_list_info.valid = GLND_SHM_INFO_VALID; + for (i = 0; i < restart_res_lock_list_info.glnd_res_lock_mds_ctxt.length; i++) + TRACE("glnd_res_lock_mds_ctxt[%i]: %u", i, restart_res_lock_list_info.glnd_res_lock_mds_ctxt.data[i]); + memset((shm_base_addr + res_lock_list->shm_index), '\0', sizeof(GLND_RESTART_RES_LOCK_LIST_INFO)); /* Fill the POSIX shared memory req info */ diff --git a/src/lck/lcknd/glnd_client.c b/src/lck/lcknd/glnd_client.c --- a/src/lck/lcknd/glnd_client.c +++ b/src/lck/lcknd/glnd_client.c @@ -125,6 +125,93 @@ GLND_CLIENT_INFO *glnd_client_node_add(G } /***************************************************************************** + PROCEDURE NAME : glnd_client_node_down + + DESCRIPTION : Sends responses to any waiting calls since the node is down. + + ARGUMENTS :glnd_cb - ptr to the GLND control block + agent_mds_dest - mds dest id for the agent. + + + RETURNS :None + + NOTES : None +*****************************************************************************/ +void glnd_client_node_down(GLND_CB *glnd_cb, GLND_CLIENT_INFO *client_info) +{ + GLND_CLIENT_LIST_RESOURCE *res_list; + TRACE_ENTER(); + + for (res_list = client_info->res_list; + res_list != NULL; + res_list = res_list->next) { + GLND_CLIENT_LIST_RESOURCE_LOCK_REQ *lckList; + + for (lckList = res_list->lck_list; lckList; lckList = lckList->next) { + GLND_RES_LOCK_LIST_INFO *lckListInfo = lckList->lck_req; + + if (lckListInfo) { + GLSV_GLA_EVT gla_evt; + + // respond to any blocked unlock calls + if (lckListInfo->unlock_call_type == GLSV_SYNC_CALL) { + glnd_stop_tmr(&lckListInfo->timeout_tmr); + + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = lckListInfo->lock_info.handleId; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_SYNC_UNLOCK; + + glnd_mds_msg_send_rsp_gla(glnd_cb, + &gla_evt, + lckListInfo->lock_info.agent_mds_dest, + &lckListInfo->glnd_res_lock_mds_ctxt); + } else if (lckListInfo->unlock_call_type == GLSV_ASYNC_CALL) { + m_GLND_RESOURCE_ASYNC_LCK_UNLOCK_FILL(gla_evt, + SA_AIS_ERR_UNAVAILABLE, + lckListInfo->lock_info.invocation, + lckListInfo->lcl_resource_id, + lckListInfo->lock_info.lcl_lockid, + 0); + gla_evt.handle = lckListInfo->lock_info.handleId; + + glnd_mds_msg_send_gla(glnd_cb, &gla_evt, lckListInfo->lock_info.agent_mds_dest); + } + + // respond to any blocked lock calls + if (lckListInfo->lock_info.call_type == GLSV_SYNC_CALL) { + glnd_stop_tmr(&lckListInfo->timeout_tmr); + + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = lckListInfo->lock_info.handleId; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_SYNC_LOCK; + + glnd_mds_msg_send_rsp_gla(glnd_cb, + &gla_evt, + lckListInfo->lock_info.agent_mds_dest, + &lckListInfo->glnd_res_lock_mds_ctxt); + } else if (lckListInfo->lock_info.call_type == GLSV_ASYNC_CALL) { + m_GLND_RESOURCE_ASYNC_LCK_GRANT_FILL(gla_evt, + SA_AIS_ERR_UNAVAILABLE, + 0, + lckListInfo->lock_info.lcl_lockid, + lckListInfo->lock_info.lock_type, + lckListInfo->lcl_resource_id, + lckListInfo->lock_info.invocation, + 0, + lckListInfo->lock_info.handleId); + + glnd_mds_msg_send_gla(glnd_cb, &gla_evt, lckListInfo->lock_info.agent_mds_dest); + } + } + } + } + + TRACE_LEAVE(); +} + +/***************************************************************************** PROCEDURE NAME : glnd_client_node_del DESCRIPTION : Deletes the client node from the tree. @@ -277,7 +364,10 @@ uint32_t glnd_client_node_resource_del(G SaLckLockModeT lock_type = SA_LCK_PR_LOCK_MODE; bool local_orphan_lock = false; + TRACE_ENTER(); + if (!client_info) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } @@ -289,25 +379,45 @@ uint32_t glnd_client_node_resource_del(G /* delete all the lock requests */ for (lock_req_list = resource_list->lck_list; lock_req_list != NULL;) { if (resource_list->rsc_info->status == GLND_RESOURCE_ACTIVE_NON_MASTER) { - /* send request to orphan the lock */ - m_GLND_RESOURCE_NODE_LCK_INFO_FILL(glnd_evt, GLSV_GLND_EVT_LCK_REQ_ORPHAN, - resource_list->rsc_info->resource_id, - lock_req_list->lck_req->lcl_resource_id, - lock_req_list->lck_req->lock_info.handleId, - lock_req_list->lck_req->lock_info.lockid, - lock_req_list->lck_req->lock_info.lock_type, - lock_req_list->lck_req->lock_info.lockFlags, - 0, 0, 0, 0, - lock_req_list->lck_req->lock_info.lcl_lockid, 0); - glnd_evt.info.node_lck_info.glnd_mds_dest = glnd_cb->glnd_mdest_id; - glnd_mds_msg_send_glnd(glnd_cb, &glnd_evt, res_info->master_mds_dest); - } else { - /* unset any orphan count */ - lock_type = lock_req_list->lck_req->lock_info.lock_type; - if ((lock_req_list->lck_req->lock_info.lockFlags & SA_LCK_LOCK_ORPHAN) == - SA_LCK_LOCK_ORPHAN) - local_orphan_lock = true; - } + if (lock_req_list->lck_req->lock_info.lockStatus == SA_LCK_LOCK_GRANTED) { + TRACE("lock granted: sending orphan request"); + /* send request to orphan the lock */ + m_GLND_RESOURCE_NODE_LCK_INFO_FILL(glnd_evt, GLSV_GLND_EVT_LCK_REQ_ORPHAN, + resource_list->rsc_info->resource_id, + lock_req_list->lck_req->lcl_resource_id, + lock_req_list->lck_req->lock_info.handleId, + lock_req_list->lck_req->lock_info.lockid, + lock_req_list->lck_req->lock_info.lock_type, + lock_req_list->lck_req->lock_info.lockFlags, + 0, 0, 0, 0, + lock_req_list->lck_req->lock_info.lcl_lockid, 0); + glnd_evt.info.node_lck_info.glnd_mds_dest = glnd_cb->glnd_mdest_id; + glnd_mds_msg_send_glnd(glnd_cb, &glnd_evt, res_info->master_mds_dest); + } else { + TRACE("lock still outstanding: sending cancel request"); + m_GLND_RESOURCE_NODE_LCK_INFO_FILL(glnd_evt, GLSV_GLND_EVT_LCK_REQ_CANCEL, + resource_list->rsc_info->resource_id, + lock_req_list->lck_req->lcl_resource_id, + lock_req_list->lck_req->lock_info.handleId, + lock_req_list->lck_req->lock_info.lockid, + lock_req_list->lck_req->lock_info.lock_type, + lock_req_list->lck_req->lock_info.lockFlags, + 0, 0, 0, 0, lock_req_list->lck_req->lock_info.lcl_lockid, 0); + + glnd_evt.info.node_lck_info.glnd_mds_dest = glnd_cb->glnd_mdest_id; + + if (res_info->status != GLND_RESOURCE_ELECTION_IN_PROGESS) + glnd_mds_msg_send_glnd(glnd_cb, &glnd_evt, res_info->master_mds_dest); + else + glnd_evt_backup_queue_add(glnd_cb, &glnd_evt); /* send it to the backup queue */ + } + } else { + /* unset any orphan count */ + lock_type = lock_req_list->lck_req->lock_info.lock_type; + if ((lock_req_list->lck_req->lock_info.lockFlags & SA_LCK_LOCK_ORPHAN) == + SA_LCK_LOCK_ORPHAN) + local_orphan_lock = true; + } del_req_list = lock_req_list; lock_req_list = lock_req_list->next; @@ -332,6 +442,7 @@ uint32_t glnd_client_node_resource_del(G } + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -361,7 +472,10 @@ uint32_t glnd_client_node_lcl_resource_d bool local_orphan_lock = false; bool resource_del = true; + TRACE_ENTER(); + if (!client_info) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } @@ -433,6 +547,7 @@ uint32_t glnd_client_node_lcl_resource_d } *resource_del_flag = resource_del; + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -504,8 +619,12 @@ uint32_t glnd_client_node_resource_lock_ GLND_CLIENT_LIST_RESOURCE *res_list, GLND_CLIENT_LIST_RESOURCE_LOCK_REQ *lock_req_list) { - if (client_info == NULL || lock_req_list == NULL) + TRACE_ENTER(); + + if (client_info == NULL || lock_req_list == NULL) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; + } if (res_list->lck_list == lock_req_list) res_list->lck_list = lock_req_list->next; @@ -515,6 +634,7 @@ uint32_t glnd_client_node_resource_lock_ lock_req_list->next->prev = lock_req_list->prev; m_MMGR_FREE_GLND_CLIENT_RES_LOCK_LIST_REQ(lock_req_list); + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -574,6 +694,7 @@ uint32_t glnd_client_node_resource_lock_ { GLND_CLIENT_LIST_RESOURCE_LOCK_REQ *lck_req_list; GLND_CLIENT_LIST_RESOURCE *resource_list; + TRACE_ENTER(); if (!client_info) return NCSCC_RC_FAILURE; @@ -591,9 +712,12 @@ uint32_t glnd_client_node_resource_lock_ resource_list->lck_list = lck_req_list->next; } glnd_client_node_resource_lock_req_del(client_info, resource_list, lck_req_list); + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } } + + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } @@ -617,6 +741,8 @@ uint32_t glnd_client_node_resource_lock_ GLND_CLIENT_LIST_RESOURCE_LOCK_REQ *lck_req_list; GLND_CLIENT_LIST_RESOURCE *resource_list; + TRACE_ENTER(); + if (!client_info) return NCSCC_RC_FAILURE; @@ -625,13 +751,20 @@ uint32_t glnd_client_node_resource_lock_ if (resource_list) { for (lck_req_list = resource_list->lck_list; lck_req_list != NULL; lck_req_list = lck_req_list->next) { + TRACE("lockType: %i lcl_resource_id: %i lockStatus: %i", + lck_req_list->lck_req->lock_info.lock_type, + lck_req_list->lck_req->lcl_resource_id, + lck_req_list->lck_req->lock_info.lockStatus); if (lck_req_list->lck_req->lock_info.lock_type == SA_LCK_EX_LOCK_MODE && lck_req_list->lck_req->lcl_resource_id == lcl_res_id - && lck_req_list->lck_req->lock_info.lockStatus == SA_LCK_LOCK_GRANTED) + && lck_req_list->lck_req->lock_info.lockStatus == SA_LCK_LOCK_GRANTED) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; + } } } + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } diff --git a/src/lck/lcknd/glnd_client.h b/src/lck/lcknd/glnd_client.h --- a/src/lck/lcknd/glnd_client.h +++ b/src/lck/lcknd/glnd_client.h @@ -18,6 +18,10 @@ #ifndef LCK_LCKND_GLND_CLIENT_H_ #define LCK_LCKND_GLND_CLIENT_H_ +#ifdef __cplusplus +extern "C" { +#endif + typedef struct glnd_client_list_resource_lock_req_tag { GLND_RES_LOCK_LIST_INFO *lck_req; struct glnd_client_list_resource_lock_req_tag *prev, *next; @@ -48,6 +52,8 @@ GLND_CLIENT_INFO *glnd_client_node_find_ GLND_CLIENT_INFO *glnd_client_node_add(GLND_CB *glnd_cb, MDS_DEST agent_mds_dest, SaLckHandleT app_handle_id); +void glnd_client_node_down(GLND_CB *glnd_cb, GLND_CLIENT_INFO *client_info); + uint32_t glnd_client_node_del(GLND_CB *glnd_cb, GLND_CLIENT_INFO *client_info); uint32_t glnd_client_node_resource_add(GLND_CLIENT_INFO *client_info, struct glnd_resource_info_tag *res_info); @@ -79,4 +85,8 @@ uint32_t glnd_client_node_resource_lock_ uint32_t glnd_client_node_resource_lock_find_duplicate_ex(GLND_CLIENT_INFO *client_info, SaLckResourceIdT res_id, SaLckResourceIdT lcl_res_id); +#ifdef __cplusplus +} +#endif + #endif // LCK_LCKND_GLND_CLIENT_H_ diff --git a/src/lck/lcknd/glnd_evt.c b/src/lck/lcknd/glnd_evt.c --- a/src/lck/lcknd/glnd_evt.c +++ b/src/lck/lcknd/glnd_evt.c @@ -64,6 +64,7 @@ ******************************************************************************/ #include "lck/lcknd/glnd.h" +#include "lck/common/glsv_defs.h" /******************************************************************************/ @@ -455,6 +456,20 @@ static uint32_t glnd_process_gla_client_ memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); gla_evt.type = GLSV_GLA_API_RESP_EVT; + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version) && + !glnd_cb->isClusterMember) { + TRACE_2("gla client initialize failed, node is not cluster member"); + /* initialise the gla_evt */ + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_INITIALIZE; + + /* send the evt */ + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, client_info->agent_mds_dest, &evt->mds_context); + + goto end; + } + if (glnd_cb->node_state != GLND_OPERATIONAL_STATE || glnd_cb->gld_card_up != true) { TRACE_2("gla client initialize failed, glnd state %d", glnd_cb->node_state); /* initialise the gla_evt */ @@ -531,20 +546,6 @@ static uint32_t glnd_process_gla_client_ memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); gla_evt.type = GLSV_GLA_API_RESP_EVT; - if (glnd_cb->node_state != GLND_OPERATIONAL_STATE || glnd_cb->gld_card_up != true) { - TRACE_2("gla client finalize failed, glnd state %d", glnd_cb->node_state); - /* initialise the gla_evt */ - gla_evt.error = SA_AIS_ERR_TRY_AGAIN; - gla_evt.handle = finalize_info->handle_id; - gla_evt.type = GLSV_GLA_API_RESP_EVT; - gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_FINALIZE; - - /* send the evt */ - glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, finalize_info->agent_mds_dest, &evt->mds_context); - - goto end; - } - /* verify that the agent exists */ if ((del_node = glnd_client_node_find(glnd_cb, finalize_info->handle_id))) { /* delete the client from the tree */ @@ -585,6 +586,7 @@ end: static uint32_t glnd_process_gla_resource_open(GLND_CB *glnd_cb, GLSV_GLND_EVT *evt) { GLSV_EVT_RSC_INFO *rsc_info; + GLND_CLIENT_INFO *client_info; GLND_RESOURCE_INFO *resource_node; GLND_RESOURCE_REQ_LIST *res_req_node; GLSV_GLA_EVT gla_evt; @@ -594,6 +596,53 @@ static uint32_t glnd_process_gla_resourc rsc_info = (GLSV_EVT_RSC_INFO *)&evt->info.rsc_info; + /* get the client handle */ + client_info = glnd_client_node_find(glnd_cb, rsc_info->client_handle_id); + if (!client_info) { + /* initialise the gla_evt */ + memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); + + gla_evt.error = SA_AIS_ERR_BAD_HANDLE; + gla_evt.handle = rsc_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_CLOSE; + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); + + TRACE_2("GLND Client node find failed"); + rc = NCSCC_RC_FAILURE; + goto end; + } + + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version) && + !glnd_cb->isClusterMember) { + TRACE_2("resource open failed, node is not cluster member"); + memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); + gla_evt.handle = rsc_info->client_handle_id; + + if (rsc_info->call_type == GLSV_SYNC_CALL) { + /* initialise the gla_evt */ + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_OPEN; + + /* send the evt */ + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); + + goto end; + } else { + gla_evt.type = GLSV_GLA_CALLBK_EVT; + gla_evt.info.gla_clbk_info.callback_type = GLSV_LOCK_RES_OPEN_CBK; + gla_evt.info.gla_clbk_info.resourceId = rsc_info->lcl_resource_id; + gla_evt.info.gla_clbk_info.params.res_open.resourceId = 0; + gla_evt.info.gla_clbk_info.params.res_open.invocation = rsc_info->invocation; + gla_evt.info.gla_clbk_info.params.res_open.error = SA_AIS_ERR_UNAVAILABLE; + /* send the evt */ + glnd_mds_msg_send_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest); + + goto end; + } + } + if (glnd_cb->node_state != GLND_OPERATIONAL_STATE || glnd_cb->gld_card_up != true) { TRACE_2("resource open failed, glnd state %d", glnd_cb->node_state); memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); @@ -790,6 +839,37 @@ static uint32_t glnd_process_gla_resourc rsc_info = (GLSV_EVT_RSC_INFO *)&evt->info.rsc_info; + /* get the client handle */ + client_info = glnd_client_node_find(glnd_cb, rsc_info->client_handle_id); + if (!client_info) { + /* initialise the gla_evt */ + memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); + + gla_evt.error = SA_AIS_ERR_BAD_HANDLE; + gla_evt.handle = rsc_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_CLOSE; + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); + + TRACE_2("GLND Client node find failed"); + rc = NCSCC_RC_FAILURE; + goto end; + } + + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version) && + !glnd_cb->isClusterMember) { + TRACE_2("resource close failed, node is not cluster member"); + /* initialise the gla_evt */ + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = rsc_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_CLOSE; + + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); + + goto end; + } + if (glnd_cb->node_state != GLND_OPERATIONAL_STATE || glnd_cb->gld_card_up != true) { TRACE_2("resource close failed, glnd state %d", glnd_cb->node_state); /* initialise the gla_evt */ @@ -803,23 +883,6 @@ static uint32_t glnd_process_gla_resourc goto end; } - /* get the client handle */ - client_info = glnd_client_node_find(glnd_cb, rsc_info->client_handle_id); - if (!client_info) { - /* initialise the gla_evt */ - memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); - - gla_evt.error = SA_AIS_ERR_BAD_HANDLE; - gla_evt.handle = rsc_info->client_handle_id; - gla_evt.type = GLSV_GLA_API_RESP_EVT; - gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_CLOSE; - glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); - - TRACE_2("GLND Client node find failed"); - rc = NCSCC_RC_FAILURE; - goto end; - } - memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); /* find the resource node */ @@ -885,17 +948,23 @@ end: static uint32_t glnd_process_check_master_and_non_master_status(GLND_CB *glnd_cb, GLND_RESOURCE_INFO *res_info) { GLND_RES_LOCK_LIST_INFO *lock_list_info = NULL; + TRACE_ENTER(); if (glnd_cb->node_state != GLND_OPERATIONAL_STATE || glnd_cb->gld_card_up != true - || res_info->master_status != GLND_OPERATIONAL_STATE) + || res_info->master_status != GLND_OPERATIONAL_STATE) { + TRACE_LEAVE2("node not operational"); return SA_AIS_ERR_TRY_AGAIN; + } for (lock_list_info = res_info->lck_master_info.grant_list; lock_list_info != NULL; lock_list_info = lock_list_info->next) { if (lock_list_info->lock_info.lock_type == SA_LCK_EX_LOCK_MODE - && lock_list_info->non_master_status == GLND_RESTART_STATE) + && lock_list_info->non_master_status == GLND_RESTART_STATE) { + TRACE_LEAVE2("non-master status in restart"); return SA_AIS_ERR_TRY_AGAIN; + } } + TRACE_LEAVE(); return SA_AIS_OK; } @@ -928,6 +997,43 @@ static uint32_t glnd_process_gla_resourc rsc_lock_info = (GLSV_EVT_RSC_LOCK_INFO *)&evt->info.rsc_lock_info; + /* get the client handle */ + client_info = glnd_client_node_find(glnd_cb, rsc_lock_info->client_handle_id); + if (!client_info) { + LOG_ER("GLND Client node find failed"); + rc = NCSCC_RC_FAILURE; + goto end; + } + + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version) && + !glnd_cb->isClusterMember) { + TRACE_2("resource lock failed, node is not cluster member"); + if (rsc_lock_info->call_type == GLSV_SYNC_CALL) { + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = rsc_lock_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_SYNC_LOCK; + + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_lock_info->agent_mds_dest, &evt->mds_context); + + rc = NCSCC_RC_FAILURE; + goto end; + } else { + m_GLND_RESOURCE_ASYNC_LCK_GRANT_FILL(gla_evt, SA_AIS_ERR_UNAVAILABLE, + 0, + rsc_lock_info->lcl_lockid, + rsc_lock_info->lock_type, + rsc_lock_info->lcl_resource_id, + rsc_lock_info->invocation, + 0, rsc_lock_info->client_handle_id); + + /* send the evt to GLA */ + glnd_mds_msg_send_gla(glnd_cb, &gla_evt, rsc_lock_info->agent_mds_dest); + rc = NCSCC_RC_FAILURE; + goto end; + } + } + /* check for the resource node */ res_node = glnd_resource_node_find(glnd_cb, rsc_lock_info->resource_id); if (!res_node) { @@ -969,14 +1075,6 @@ static uint32_t glnd_process_gla_resourc } - /* get the client handle */ - client_info = glnd_client_node_find(glnd_cb, rsc_lock_info->client_handle_id); - if (!client_info) { - LOG_ER("GLND Client node find failed"); - rc = NCSCC_RC_FAILURE; - goto end; - } - gla_evt.handle = rsc_lock_info->client_handle_id; if (res_node->status == GLND_RESOURCE_NOT_INITIALISED) { @@ -1041,9 +1139,11 @@ static uint32_t glnd_process_gla_resourc } /* add this resource lock to the client */ if (lck_list_info->lock_info.lockStatus != SA_LCK_LOCK_NOT_QUEUED - && lck_list_info->lock_info.lockStatus != SA_LCK_LOCK_ORPHANED) + && lck_list_info->lock_info.lockStatus != SA_LCK_LOCK_ORPHANED) { + lck_list_info->glnd_res_lock_mds_ctxt = evt->mds_context; glnd_restart_res_lock_list_ckpt_write(glnd_cb, lck_list_info, lck_list_info->res_info->resource_id, 0, 2); + } switch (lck_list_info->lock_info.lockStatus) { case SA_LCK_LOCK_GRANTED: @@ -1158,6 +1258,38 @@ static uint32_t glnd_process_gla_resourc rsc_unlock_info = (GLSV_EVT_RSC_UNLOCK_INFO *)&evt->info.rsc_unlock_info; memset(&lck_info, 0, sizeof(GLSV_LOCK_REQ_INFO)); + /* get the client handle */ + client_info = glnd_client_node_find(glnd_cb, rsc_unlock_info->client_handle_id); + if (!client_info) { + LOG_ER("GLND Client node find failed"); + goto end; + } + + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version) && + !glnd_cb->isClusterMember) { + TRACE_2("resource unlock failed, node is not cluster member"); + if (rsc_unlock_info->call_type == GLSV_SYNC_CALL) { + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = rsc_unlock_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_SYNC_UNLOCK; + + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_unlock_info->agent_mds_dest, &evt->mds_context); + + goto end; + } else { + m_GLND_RESOURCE_ASYNC_LCK_UNLOCK_FILL(gla_evt, SA_AIS_ERR_UNAVAILABLE, + rsc_unlock_info->invocation, + rsc_unlock_info->lcl_resource_id, + rsc_unlock_info->lcl_lockid, 0); + gla_evt.handle = rsc_unlock_info->client_handle_id; + + /* send the evt to GLA */ + glnd_mds_msg_send_gla(glnd_cb, &gla_evt, rsc_unlock_info->agent_mds_dest); + goto end; + } + } + /* get the resource node */ res_node = glnd_resource_node_find(glnd_cb, rsc_unlock_info->resource_id); if (!res_node) { @@ -1196,13 +1328,6 @@ static uint32_t glnd_process_gla_resourc } } - /* get the client handle */ - client_info = glnd_client_node_find(glnd_cb, rsc_unlock_info->client_handle_id); - if (!client_info) { - LOG_ER("GLND Client node find failed"); - goto end; - } - if (res_node->status == GLND_RESOURCE_NOT_INITIALISED) { /* sleep for relection time and resend the event */ uint32_t tm = GLSV_GLND_MASTER_REELECTION_WAIT_TIME / 10000000; @@ -1375,6 +1500,7 @@ static uint32_t glnd_process_gla_resourc { GLSV_EVT_RSC_INFO *rsc_info; GLND_RESOURCE_INFO *res_node = NULL; + GLND_CLIENT_INFO *client_info; rsc_info = (GLSV_EVT_RSC_INFO *)&evt->info.rsc_info; GLSV_GLA_EVT gla_evt; SaAisErrorT error = SA_AIS_ERR_LIBRARY; @@ -1384,6 +1510,38 @@ static uint32_t glnd_process_gla_resourc memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); gla_evt.type = GLSV_GLA_API_RESP_EVT; + /* get the client handle */ + client_info = glnd_client_node_find(glnd_cb, rsc_info->client_handle_id); + if (!client_info) { + /* initialise the gla_evt */ + memset(&gla_evt, 0, sizeof(GLSV_GLA_EVT)); + + gla_evt.error = SA_AIS_ERR_BAD_HANDLE; + gla_evt.handle = rsc_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_CLOSE; + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); + + TRACE_2("GLND Client node find failed"); + rc = NCSCC_RC_FAILURE; + goto end; + } + + if (m_GLA_VER_IS_AT_LEAST_B_3(client_info->version) && + !glnd_cb->isClusterMember) { + TRACE_2("resource purge failed, node is not cluster member"); + /* initialise the gla_evt */ + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = rsc_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_INITIALIZE; + + /* send the evt */ + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, rsc_info->agent_mds_dest, &evt->mds_context); + + goto end; + } + /* get the resource node */ res_node = glnd_resource_node_find(glnd_cb, rsc_info->resource_id); if (!res_node) { @@ -1525,9 +1683,11 @@ static uint32_t glnd_process_glnd_lck_re if (lck_list_info) { /* Checkpointing if the lock is granted or in waitlist */ if (lck_list_info->lock_info.lockStatus != SA_LCK_LOCK_NOT_QUEUED - && lck_list_info->lock_info.lockStatus != SA_LCK_LOCK_ORPHANED) + && lck_list_info->lock_info.lockStatus != SA_LCK_LOCK_ORPHANED) { + lck_list_info->glnd_res_lock_mds_ctxt = evt->mds_context; glnd_restart_res_lock_list_ckpt_write(glnd_cb, lck_list_info, lck_list_info->res_info->resource_id, 0, 2); + } switch (lck_list_info->lock_info.lockStatus) { case SA_LCK_LOCK_GRANTED: @@ -1654,7 +1814,6 @@ static uint32_t glnd_process_glnd_unlck_ /* do the re sync of the grant list */ glnd_resource_master_lock_resync_grant_list(glnd_cb, res_node); - rc = NCSCC_RC_SUCCESS; goto end; } else { @@ -2057,6 +2216,13 @@ static uint32_t glnd_process_glnd_lck_wa waiter_clbk = (GLSV_EVT_GLND_LCK_INFO *)&evt->info.node_lck_info; + // don't send the callback if we are not a cluster member + if (!glnd_cb->isClusterMember) { + TRACE("not sending waiter callback because this node is not in the cluster"); + rc = NCSCC_RC_FAILURE; + goto end; + } + res_node = glnd_resource_node_find(glnd_cb, waiter_clbk->resource_id); if (!res_node) { LOG_ER("GLND Rsc node find failed"); diff --git a/src/lck/lcknd/glnd_mds.c b/src/lck/lcknd/glnd_mds.c --- a/src/lck/lcknd/glnd_mds.c +++ b/src/lck/lcknd/glnd_mds.c @@ -56,6 +56,7 @@ static uint32_t glsv_dec_reg_unreg_agent static uint32_t glsv_dec_client_info_evt(NCS_UBAID *uba, GLSV_EVT_RESTART_CLIENT_INFO *evt); static uint32_t glsv_gla_enc_callbk_evt(NCS_UBAID *uba, GLSV_GLA_CALLBACK_INFO *evt); static uint32_t glsv_gla_enc_api_resp_evt(NCS_UBAID *uba, GLSV_GLA_API_RESP_INFO *evt); +static uint32_t glsv_gla_enc_clm_evt(NCS_UBAID *uba, GLSV_GLA_CLM_INFO *evt); uint32_t glnd_mds_get_handle(GLND_CB *cb); @@ -358,6 +359,10 @@ static uint32_t glnd_mds_enc(GLND_CB *cb rc = glsv_gla_enc_api_resp_evt(uba, &evt->info.gla_resp_info); break; + case GLSV_GLA_CLM_EVT: + rc = glsv_gla_enc_clm_evt(uba, &evt->info.gla_clm_info); + break; + default: goto end; } @@ -1560,3 +1565,37 @@ static uint32_t glsv_gla_enc_api_resp_ev return NCSCC_RC_SUCCESS; } + +/**************************************************************************** + Name : glsv_gla_enc_clm_evt + + Description : This routine encodes api response info. + + Arguments : uba , api response info. + + Return Values : NCSCC_RC_SUCCESS/NCSCC_RC_FAILURE + + Notes : None. +******************************************************************************/ +static uint32_t glsv_gla_enc_clm_evt(NCS_UBAID *uba, GLSV_GLA_CLM_INFO *evt) +{ + uint8_t *p8, size; + uint32_t rc = NCSCC_RC_SUCCESS; + + do { + size = (4); + /** encode the type of message **/ + p8 = ncs_enc_reserve_space(uba, size); + if (!p8) { + TRACE_2("GLND enc failed"); + rc = NCSCC_RC_FAILURE; + break; + } + + osaf_encode_bool(uba, evt->isClusterMember); + + ncs_enc_claim_space(uba, size); + } while (false); + + return rc; +} diff --git a/src/lck/lcknd/glnd_mds.h b/src/lck/lcknd/glnd_mds.h --- a/src/lck/lcknd/glnd_mds.h +++ b/src/lck/lcknd/glnd_mds.h @@ -30,6 +30,10 @@ #ifndef LCK_LCKND_GLND_MDS_H_ #define LCK_LCKND_GLND_MDS_H_ +#ifdef __cplusplus +extern "C" { +#endif + /*****************************************************************************/ #define SVC_SUBPART_VER uns32 @@ -163,4 +167,8 @@ do { \ (l_evt).info.gla_clbk_info.params.unlock.error = (l_err); \ } while (0); +#ifdef __cplusplus +} +#endif + #endif // LCK_LCKND_GLND_MDS_H_ diff --git a/src/lck/lcknd/glnd_res.c b/src/lck/lcknd/glnd_res.c --- a/src/lck/lcknd/glnd_res.c +++ b/src/lck/lcknd/glnd_res.c @@ -172,8 +172,10 @@ GLND_RESOURCE_INFO *glnd_resource_node_a uint32_t glnd_set_orphan_state(GLND_CB *glnd_cb, GLND_RESOURCE_INFO *res_info) { GLND_RES_LOCK_LIST_INFO *grant_list = NULL; + TRACE_ENTER(); if (res_info->lck_master_info.grant_list == NULL) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } grant_list = res_info->lck_master_info.grant_list; @@ -198,6 +200,7 @@ uint32_t glnd_set_orphan_state(GLND_CB * } grant_list = grant_list->next; } + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -274,8 +277,13 @@ uint32_t glnd_resource_node_destroy(GLND GLND_RES_LOCK_LIST_INFO *lock_info = NULL; GLND_RES_LOCK_LIST_INFO *prev_lock_info = NULL; - if (res_info == NULL) + TRACE_ENTER(); + + if (res_info == NULL) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; + } + memset(&evt, 0, sizeof(GLSV_GLD_EVT)); evt.evt_type = GLSV_GLD_EVT_RSC_CLOSE; evt.info.rsc_details.rsc_id = res_info->resource_id; @@ -285,11 +293,11 @@ uint32_t glnd_resource_node_destroy(GLND if (ncs_patricia_tree_del(&glnd_cb->glnd_res_tree, &res_info->patnode) != NCSCC_RC_SUCCESS) { LOG_ER("GLND Rsc node destroy failed"); + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } TRACE("GLND Resource node destroy - %d", (uint32_t)res_info->resource_id); - TRACE("GLND Rsc node destroy success: resource_id %u", (uint32_t)res_info->resource_id); for (lock_info = res_info->lck_master_info.grant_list; lock_info != NULL;) { prev_lock_info = lock_info; @@ -324,6 +332,7 @@ uint32_t glnd_resource_node_destroy(GLND glnd_res_shm_section_invalidate(glnd_cb, res_info); /* free the memory */ m_MMGR_FREE_GLND_RESOURCE_INFO(res_info); + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -492,6 +501,9 @@ GLND_RES_LOCK_LIST_INFO *glnd_resource_l void glnd_resource_lock_req_delete(GLND_RESOURCE_INFO *res_info, GLND_RES_LOCK_LIST_INFO *lck_list_info) { GLND_CB *glnd_cb; + + TRACE_ENTER(); + /* take the handle */ glnd_cb = (GLND_CB *)m_GLND_TAKE_GLND_CB; if (!glnd_cb) { @@ -507,6 +519,7 @@ void glnd_resource_lock_req_delete(GLND_ /* Giveup the handle */ m_GLND_GIVEUP_GLND_CB; + TRACE_LEAVE(); return; } @@ -546,8 +559,6 @@ void glnd_resource_lock_req_destroy(GLND TRACE("GLND Resource lock req destroy res - %d lockid- %d", (uint32_t)res_info->resource_id, (uint32_t)lck_list_info->lock_info.lockid); - TRACE("GLND Rsc lock req destroy: resource_id %u, lockid %u", (uint32_t)res_info->resource_id, - (uint32_t)lck_list_info->lock_info.lockid); m_MMGR_FREE_GLND_RES_LOCK_LIST_INFO(lck_list_info); return; } @@ -1010,6 +1021,10 @@ GLND_RES_LOCK_LIST_INFO *glnd_resource_m /* set the value of the lock status */ lock_list_info->lock_info.lockStatus = GLSV_LOCK_STATUS_RELEASED; + + /* alex */ + glnd_restart_res_lock_list_ckpt_overwrite(cb, lock_list_info, + res_info->resource_id, 0, 2); } } done: @@ -1184,6 +1199,10 @@ static void glnd_master_process_lock_ini glnd_mds_msg_send_glnd(cb, &glnd_evt, lock_list_info->req_mdest_id); } else { + // don't send the callback if we are not a cluster member + if (!cb->isClusterMember) + return; + GLSV_GLA_EVT gla_evt; GLND_CLIENT_INFO *client_info; /* send it to the local GLA component */ @@ -1282,14 +1301,19 @@ static void glnd_resource_master_grant_l { GLSV_GLND_EVT glnd_evt; GLSV_GLA_EVT gla_evt; + int i; + TRACE_ENTER(); TRACE("LOCK_GRANTED handle - %d res - %d lockid- %d", (uint32_t)lock_list_node->lock_info.handleId, (uint32_t)res_info->resource_id, (uint32_t)lock_list_node->lock_info.lockid); - TRACE("GLND Rsc lock granted: handleId %u, resource_id %u, lockid %u", + TRACE("GLND Rsc lock granted: handleId %u, resource_id %u, lockid %u " + "lcl_lockid: %llu agent_mds_dest: %lu", (uint32_t)lock_list_node->lock_info.handleId, (uint32_t)res_info->resource_id, - (uint32_t)lock_list_node->lock_info.lockid); + (uint32_t)lock_list_node->lock_info.lockid, + lock_list_node->lock_info.lcl_lockid, + lock_list_node->lock_info.agent_mds_dest); if (m_GLND_IS_LOCAL_NODE(&lock_list_node->req_mdest_id, &glnd_cb->glnd_mdest_id) == 0) { /* local master */ @@ -1302,6 +1326,9 @@ static void glnd_resource_master_grant_l lock_list_node->lock_info.lockStatus, lock_list_node->lock_info.handleId); /* send the evt to GLA */ + for (i = 0; i < lock_list_node->glnd_res_lock_mds_ctxt.length; i++) + TRACE("glnd_res_lock_mds_ctxt[%i]: %u", i, lock_list_node->glnd_res_lock_mds_ctxt.data[i]); + glnd_mds_msg_send_rsp_gla(glnd_cb, &gla_evt, lock_list_node->lock_info.agent_mds_dest, &lock_list_node->glnd_res_lock_mds_ctxt); @@ -1332,6 +1359,11 @@ static void glnd_resource_master_grant_l /* send the response evt to GLND */ glnd_mds_msg_send_glnd(glnd_cb, &glnd_evt, lock_list_node->req_mdest_id); } + + /* alex */ + glnd_restart_res_lock_list_ckpt_overwrite(glnd_cb, lock_list_node, res_info->resource_id, 0, 2); + + TRACE_LEAVE(); } /***************************************************************************** @@ -1710,6 +1742,8 @@ bool glnd_deadlock_detect(GLND_CB *glnd_ GLSV_GLA_EVT gla_evt; GLSV_GLND_EVT glnd_evt; + TRACE_ENTER(); + dd_info_list = dd_probe->dd_info_list; while (dd_info_list != NULL) { if ((client_info->app_handle_id == dd_info_list->blck_hdl_id) && @@ -1797,10 +1831,14 @@ bool glnd_deadlock_detect(GLND_CB *glnd_ glnd_client_node_resource_lock_req_del(client_info, client_res_list, lck_req_info); } - if (deadlock_present || ignore_probe) + if (deadlock_present || ignore_probe) { + TRACE_LEAVE2("true"); return true; - else + } + else { + TRACE_LEAVE2("false"); return false; + } } /***************************************************************************** diff --git a/src/lck/lcknd/glnd_res_req.c b/src/lck/lcknd/glnd_res_req.c --- a/src/lck/lcknd/glnd_res_req.c +++ b/src/lck/lcknd/glnd_res_req.c @@ -160,3 +160,51 @@ void glnd_resource_req_node_del(GLND_CB } return; } + +/***************************************************************************** + PROCEDURE NAME : glnd_resource_req_node_down + + DESCRIPTION : Sends callback responses to outstanding requests from node + down + + ARGUMENTS :glnd_cb - ptr to the GLND control block + + + RETURNS :The pointer to the resource req node info on success. + else returns NULL. + + NOTES : Delete the returned pointer immediately. +*****************************************************************************/ +void glnd_resource_req_node_down(GLND_CB *glnd_cb) +{ + GLND_RESOURCE_REQ_LIST *res_req_info; + + for (res_req_info = glnd_cb->res_req_list; + res_req_info; + res_req_info = res_req_info->next) { + GLSV_GLA_EVT gla_evt; + + if (res_req_info->call_type == GLSV_SYNC_CALL) { + glnd_stop_tmr(&res_req_info->timeout); + + gla_evt.error = SA_AIS_ERR_UNAVAILABLE; + gla_evt.handle = res_req_info->client_handle_id; + gla_evt.type = GLSV_GLA_API_RESP_EVT; + gla_evt.info.gla_resp_info.type = GLSV_GLA_LOCK_RES_OPEN; + + glnd_mds_msg_send_rsp_gla(glnd_cb, + &gla_evt, + res_req_info->agent_mds_dest, + &res_req_info->glnd_res_mds_ctxt); + } else if (res_req_info->call_type == GLSV_ASYNC_CALL) { + gla_evt.type = GLSV_GLA_CALLBK_EVT; + gla_evt.info.gla_clbk_info.callback_type = GLSV_LOCK_RES_OPEN_CBK; + gla_evt.info.gla_clbk_info.resourceId = res_req_info->lcl_resource_id; + gla_evt.info.gla_clbk_info.params.res_open.resourceId = 0; + gla_evt.info.gla_clbk_info.params.res_open.invocation = res_req_info->invocation; + gla_evt.info.gla_clbk_info.params.res_open.error = SA_AIS_ERR_UNAVAILABLE; + + glnd_mds_msg_send_gla(glnd_cb, &gla_evt, res_req_info->agent_mds_dest); + } + } +} diff --git a/src/lck/lcknd/glnd_restart.c b/src/lck/lcknd/glnd_restart.c --- a/src/lck/lcknd/glnd_restart.c +++ b/src/lck/lcknd/glnd_restart.c @@ -149,6 +149,7 @@ static uint32_t glnd_restart_build_res_l GLND_RESTART_RES_LOCK_LIST_INFO *shm_base_addr = NULL; SaAisErrorT rc; uint32_t i; + TRACE_ENTER(); shm_base_addr = glnd_cb->glnd_lck_shm_base_addr; for (i = 0; i < GLND_RES_LOCK_INFO_CKPT_MAX_SECTIONS; i++) { @@ -164,12 +165,14 @@ static uint32_t glnd_restart_build_res_l } else { TRACE_2("GLND restart lck list build failure: rc %d", rc); + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } } } TRACE_1("GLND restart lck list build success"); + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -190,17 +193,21 @@ static uint32_t glnd_restart_add_res_loc GLND_RESOURCE_INFO *res_info = NULL; GLND_RES_LOCK_LIST_INFO *lck_list_info = NULL; GLND_CLIENT_INFO *client_info = NULL; - uint32_t node_id; + //alex uint32_t node_id; + int i; + TRACE_ENTER(); - if (restart_res_lock_list_info == NULL) + if (restart_res_lock_list_info == NULL) { + TRACE_LEAVE(); return NCSCC_RC_FAILURE; + } lck_list_info = (GLND_RES_LOCK_LIST_INFO *)m_MMGR_ALLOC_GLND_RES_LOCK_LIST_INFO; if (lck_list_info == NULL) { LOG_CR("GLND Rsc lock list alloc failed: Error %s", strerror(errno)); assert(0); } - node_id = m_NCS_NODE_ID_FROM_MDS_DEST(restart_res_lock_list_info->req_mdest_id); + //alex node_id = m_NCS_NODE_ID_FROM_MDS_DEST(restart_res_lock_list_info->req_mdest_id); client_info = (GLND_CLIENT_INFO *)ncs_patricia_tree_get(&glnd_cb->glnd_client_tree, @@ -216,35 +223,56 @@ static uint32_t glnd_restart_add_res_loc ncshm_create_hdl((uint8_t)glnd_cb->pool_id, NCS_SERVICE_ID_GLND, (NCSCONTEXT)lck_list_info); lck_list_info->lock_info = restart_res_lock_list_info->lock_info; + /* + * alex if (node_id == m_NCS_NODE_ID_FROM_MDS_DEST(glnd_cb->glnd_mdest_id)) lck_list_info->req_mdest_id = glnd_cb->glnd_mdest_id; else + */ lck_list_info->req_mdest_id = restart_res_lock_list_info->req_mdest_id; lck_list_info->res_info = res_info; + lck_list_info->glnd_res_lock_mds_ctxt = restart_res_lock_list_info->glnd_res_lock_mds_ctxt; lck_list_info->lcl_resource_id = restart_res_lock_list_info->lcl_resource_id; lck_list_info->unlock_call_type = restart_res_lock_list_info->unlock_call_type; lck_list_info->unlock_req_sent = restart_res_lock_list_info->unlock_req_sent; lck_list_info->non_master_status = restart_res_lock_list_info->non_master_status; lck_list_info->shm_index = restart_res_lock_list_info->shm_index; + + TRACE("lcl_resource_id: %u unlock_call_type: %i unlock_req_sent: %i " + "non_master_status: %i lockid: %llu lcl_lock_id: %llu", + lck_list_info->lcl_resource_id, + lck_list_info->unlock_call_type, + lck_list_info->unlock_req_sent, + lck_list_info->non_master_status, + lck_list_info->lock_info.lockid, + lck_list_info->lock_info.lcl_lockid); + + for (i = 0; i < lck_list_info->glnd_res_lock_mds_ctxt.length; i++) + TRACE("glnd_res_lock_mds_ctxt[%i]: %u", i, lck_list_info->glnd_res_lock_mds_ctxt.data[i]); + /* based on which_list add the restart_res_lock_list_info->which_list add res_lock_list_info to either lock_master_info or lcl_lck_req_info of resource_info */ if (restart_res_lock_list_info->to_which_list == LCL_LOCK_REQ_LIST) { + TRACE("add to local lock req info"); /* ADD TO LCL_LOCK_REQ_INFO */ lck_list_info->next = res_info->lcl_lck_req_info; if (res_info->lcl_lck_req_info) res_info->lcl_lck_req_info->prev = lck_list_info; res_info->lcl_lck_req_info = lck_list_info; } else if (restart_res_lock_list_info->to_which_list == LOCK_MASTER_LIST) { + TRACE("add to lock master info"); /* ADD TO LOCK_MASTER_INFO */ if (lck_list_info->lock_info.lock_type == SA_LCK_EX_LOCK_MODE) { if (lck_list_info->lock_info.lockStatus == SA_LCK_LOCK_GRANTED) { if (res_info->lck_master_info.grant_list == NULL) { /* add it to the grant list */ + TRACE("add to grant list"); res_info->lck_master_info.grant_list = lck_list_info; } } else { /*Add to the wait_list */ + TRACE("add to wait list"); lck_list_info->next = res_info->lck_master_info.wait_exclusive_list; if (res_info->lck_master_info.wait_exclusive_list) res_info->lck_master_info.wait_exclusive_list->prev = lck_list_info; @@ -261,11 +289,13 @@ static uint32_t glnd_restart_add_res_loc } if (lck_list_info->lock_info.lock_type == SA_LCK_PR_LOCK_MODE) { if (lck_list_info->lock_info.lockStatus == SA_LCK_LOCK_GRANTED) { + TRACE("add to grant list"); lck_list_info->next = res_info->lck_master_info.grant_list; if (res_info->lck_master_info.grant_list) res_info->lck_master_info.grant_list->prev = lck_list_info; res_info->lck_master_info.grant_list = lck_list_info; } else { + TRACE("add to wait list"); /*Add to the wait_list */ /* add it to the read wait list */ lck_list_info->next = res_info->lck_master_info.wait_read_list; @@ -290,6 +320,7 @@ static uint32_t glnd_restart_add_res_loc } else m_MMGR_FREE_GLND_RES_LOCK_LIST_INFO(lck_list_info); + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -340,6 +371,18 @@ static uint32_t glnd_restart_resource_no res_info->lck_master_info.ex_orphaned = restart_res_info->ex_orphaned; res_info->shm_index = restart_res_info->shm_index; + TRACE("resource_id: %i status: %i master_status: %i lcl_ref_cnt: %i " + "pr_orphan_req_count: %i ex_orphan_req_count: %i pr_orphaned: %i " + "ex_orphaned: %i", + res_info->resource_id, + res_info->status, + res_info->master_status, + res_info->lcl_ref_cnt, + res_info->lck_master_info.pr_orphan_req_count, + res_info->lck_master_info.ex_orphan_req_count, + res_info->lck_master_info.pr_orphaned, + res_info->lck_master_info.ex_orphaned); + memcpy(&res_info->resource_name, &restart_res_info->resource_name, sizeof(SaNameT)); node_id = m_NCS_NODE_ID_FROM_MDS_DEST(restart_res_info->master_mds_dest); @@ -355,10 +398,12 @@ static uint32_t glnd_restart_resource_no if (ncs_patricia_tree_add(&glnd_cb->glnd_res_tree, &res_info->patnode) != NCSCC_RC_SUCCESS) { LOG_ER("GLND Rsc node add failed"); m_MMGR_FREE_GLND_RESOURCE_INFO(res_info); + TRACE_LEAVE(); return NCSCC_RC_FAILURE; } TRACE("GLND Rsc node add success: resource_id %u", (uint32_t)res_info->resource_id); } + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -376,9 +421,11 @@ static uint32_t glnd_restart_resource_no GLND_RESOURCE_INFO *glnd_restart_client_resource_node_add(GLND_CB *glnd_cb, SaLckResourceIdT resource_id) { GLND_RESOURCE_INFO *res_info = NULL; + TRACE_ENTER(); /* check to see if already present */ if ((res_info = glnd_resource_node_find(glnd_cb, resource_id)) != NULL) { + TRACE_LEAVE(); return res_info; } @@ -399,10 +446,12 @@ GLND_RESOURCE_INFO *glnd_restart_client_ if (ncs_patricia_tree_add(&glnd_cb->glnd_res_tree, &res_info->patnode) != NCSCC_RC_SUCCESS) { LOG_ER("GLND Rsc node add failed"); m_MMGR_FREE_GLND_RESOURCE_INFO(res_info); + TRACE_LEAVE(); return NULL; } /* log the Resource Add */ TRACE("GLND Rsc node add success: resource_id %u", (uint32_t)res_info->resource_id); + TRACE_LEAVE(); return res_info; } @@ -423,6 +472,7 @@ static uint32_t glnd_restart_event_add(G GLSV_GLND_EVT glnd_evt; GLND_RES_LOCK_LIST_INFO *lck_list_info = NULL; GLND_RESOURCE_INFO *res_node = NULL; + TRACE_ENTER(); /* check for the resource node */ res_node = glnd_resource_node_find(glnd_cb, evt_info->resource_id); @@ -461,6 +511,7 @@ static uint32_t glnd_restart_event_add(G glnd_evt_backup_queue_add(glnd_cb, &glnd_evt); } + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } @@ -482,6 +533,8 @@ static uint32_t glnd_restart_build_backu SaAisErrorT rc; uint32_t i; + TRACE_ENTER(); + shm_base_address = glnd_cb->glnd_evt_shm_base_addr; for (i = 0; i < GLND_BACKUP_EVT_CKPT_MAX_SECTIONS; i++) { if (shm_base_address[i].valid == GLND_SHM_INFO_VALID) { @@ -498,5 +551,6 @@ static uint32_t glnd_restart_build_backu } } TRACE_1("GLND restart evt list build success"); + TRACE_LEAVE(); return NCSCC_RC_SUCCESS; } diff --git a/src/smf/smfd/SmfCampaignThread.cc b/src/smf/smfd/SmfCampaignThread.cc --- a/src/smf/smfd/SmfCampaignThread.cc +++ b/src/smf/smfd/SmfCampaignThread.cc @@ -907,12 +907,10 @@ int SmfCampaignThread::handleEvents(void break; } - /* Process the Mail box events */ - if (fds[0].revents & POLLIN) { - /* dispatch MBX events */ - processEvt(); - } - + /* + * Handle NTF events first because processEvt may delete and terminate the + * campaign thread. + */ if (fds[1].revents & POLLIN) { // dispatch NTF events rc = saNtfDispatch(m_ntfHandle, SA_DISPATCH_ALL); @@ -922,6 +920,12 @@ int SmfCampaignThread::handleEvents(void } } + /* Process the Mail box events */ + if (fds[0].revents & POLLIN) { + /* dispatch MBX events */ + processEvt(); + } + m_campaign->updateElapsedTime(); } TRACE_LEAVE(); diff --git a/tools/cluster_sim_uml/opensaf b/tools/cluster_sim_uml/opensaf --- a/tools/cluster_sim_uml/opensaf +++ b/tools/cluster_sim_uml/opensaf @@ -241,7 +241,9 @@ node_start() { # start the uml vm in its own xterm x=$((p < 3 ? 65 : 910)) y=$((24 + (p < 3 ? p - 1 : p - 3) * dy)) - xterm -hold -geometry 140x20+$x+$y -T $hostname -e \ + #xterm -hold -geometry 140x20+$x+$y -T $hostname -e \ + # $UML_DIR/bin/uml_start $p umid=$hostname mem=${OSAF_UML_MEMSIZE} hostname=$hostname shadowroot=$rootdir $$ & + gnome-terminal --hold --geometry 140x20+$x+$y --T $hostname -e \ $UML_DIR/bin/uml_start $p umid=$hostname mem=${OSAF_UML_MEMSIZE} hostname=$hostname shadowroot=$rootdir $$ & echo $! > $PIDFILEDIR/$hostname ------------------------------------------------------------------------------ Check out the vibrant tech community on one of the world's most engaging tech sites, Slashdot.org! http://sdm.link/slashdot _______________________________________________ Opensaf-devel mailing list Opensaf-devel@lists.sourceforge.net https://lists.sourceforge.net/lists/listinfo/opensaf-devel