This patch only works when compatibility: none is specified in the
corosync configuration file.
It allows synchronization to complete between a corosync process without
ais services and a corosync process with ais services (with evt disabled
by hacking the openaisserviceenable.c file).
request for comments welcome
The algorithm that it implements is also attached.
Regards
-steve
Index: exec/syncv2.c
===================================================================
--- exec/syncv2.c (revision 0)
+++ exec/syncv2.c (revision 0)
@@ -0,0 +1,507 @@
+/*
+ * Copyright (c) 2009 Red Hat, Inc.
+ *
+ * All rights reserved.
+ *
+ * Author: Steven Dake ([email protected])
+ *
+ * This software licensed under BSD license, the text of which follows:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - Neither the name of the MontaVista Software, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <config.h>
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <sys/ioctl.h>
+#include <netinet/in.h>
+#include <sys/uio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <time.h>
+#include <unistd.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <corosync/corotypes.h>
+#include <corosync/swab.h>
+#include <corosync/totem/totempg.h>
+#include <corosync/totem/totem.h>
+#include <corosync/lcr/lcr_ifact.h>
+#include <corosync/engine/logsys.h>
+#include <corosync/coroipc_types.h>
+#include "schedwrk.h"
+#include "quorum.h"
+#include "sync.h"
+#include "syncv2.h"
+
+LOGSYS_DECLARE_SUBSYS ("SYNCV2");
+
+#define MESSAGE_REQ_SYNC_BARRIER 0
+#define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
+
+enum sync_process_state {
+ INIT,
+ PROCESS,
+ ACTIVATE
+};
+
+enum sync_state {
+ SYNC_SERVICELIST_BUILD,
+ SYNC_PROCESS,
+ SYNC_BARRIER
+};
+
+struct service_entry {
+ int service_id;
+ void (*sync_init) (void);
+ void (*sync_abort) (void);
+ int (*sync_process) (void);
+ void (*sync_activate) (void);
+ enum sync_process_state state;
+};
+
+struct processor_entry {
+ int nodeid;
+ int received;
+};
+
+struct req_exec_service_build_message {
+ coroipc_request_header_t header;
+ struct memb_ring_id ring_id;
+ int service_list[128];
+ int service_list_entries;
+};
+
+struct req_exec_barrier_message {
+ coroipc_request_header_t header;
+ struct memb_ring_id ring_id;
+};
+
+static enum sync_state my_state = SYNC_BARRIER;
+
+static struct memb_ring_id my_ring_id;
+
+static int my_processing_idx = 0;
+
+static hdb_handle_t my_schedwrk_handle;
+
+static struct processor_entry my_processor_list[128];
+
+static int my_processor_list_entries = 0;
+
+static struct service_entry my_service_list[128];
+
+static int my_service_list_entries = 0;
+
+static const struct memb_ring_id sync_ring_id;
+
+static struct service_entry my_initial_service_list[128];
+
+static int my_initial_service_list_entries;
+
+static int (*sync_callbacks_retrieve) (int sync_id, struct sync_callbacks *callack);
+
+static void (*sync_synchronization_completed) (void);
+
+static void sync_deliver_fn (
+ unsigned int nodeid,
+ const void *msg,
+ unsigned int msg_len,
+ int endian_conversion_required);
+
+static void sync_confchg_fn (
+ enum totem_configuration_type configuration_type,
+ const unsigned int *member_list, size_t member_list_entries,
+ const unsigned int *left_list, size_t left_list_entries,
+ const unsigned int *joined_list, size_t joined_list_entries,
+ const struct memb_ring_id *ring_id);
+
+static int schedwrk_processor (const void *context);
+
+static void sync_process_enter (void);
+
+static struct totempg_group sync_group = {
+ .group = "syncv2",
+ .group_len = 6
+};
+
+static hdb_handle_t sync_group_handle;
+
+
+int sync_v2_register (
+ int (*callbacks_retrieve) (int sync_id, struct sync_callbacks *callack),
+ void (*synchronization_completed) (void))
+{
+ unsigned int res;
+ int i;
+ struct sync_callbacks sync_callbacks;
+
+ res = totempg_groups_initialize (
+ &sync_group_handle,
+ sync_deliver_fn,
+ sync_confchg_fn);
+ if (res == -1) {
+ log_printf (LOGSYS_LEVEL_ERROR,
+ "Couldn't initialize groups interface.\n");
+ return (-1);
+ }
+
+ res = totempg_groups_join (
+ sync_group_handle,
+ &sync_group,
+ 1);
+ if (res == -1) {
+ log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.\n");
+ return (-1);
+ }
+
+ sync_callbacks_retrieve = callbacks_retrieve;
+ sync_synchronization_completed = synchronization_completed;
+ for (i = 0; i < 64; i++) {
+ res = sync_callbacks_retrieve (i, &sync_callbacks);
+ if (res == -1) {
+ continue;
+ }
+ if (sync_callbacks.sync_init == NULL) {
+ continue;
+ }
+ my_initial_service_list[my_initial_service_list_entries].state =
+ INIT;
+ my_initial_service_list[my_initial_service_list_entries].service_id = i;
+ my_initial_service_list[my_initial_service_list_entries].sync_init = sync_callbacks.sync_init;
+ my_initial_service_list[my_initial_service_list_entries].sync_process = sync_callbacks.sync_process;
+ my_initial_service_list[my_initial_service_list_entries].sync_abort = sync_callbacks.sync_abort;
+ my_initial_service_list[my_initial_service_list_entries].sync_activate = sync_callbacks.sync_activate;
+ my_initial_service_list_entries += 1;
+ }
+ return (0);
+}
+
+#ifdef TODO
+static void sync_primary_callback_fn (
+ const unsigned int *view_list,
+ size_t view_list_entries,
+ int primary_designated,
+ const struct memb_ring_id *ring_id)
+{
+ int i;
+
+ if (primary_designated) {
+ log_printf (LOGSYS_LEVEL_DEBUG, "This node is within the primary component and will provide service.\n");
+ } else {
+ log_printf (LOGSYS_LEVEL_DEBUG, "This node is within the non-primary component and will NOT provide any services.\n");
+ return;
+ }
+
+ /*
+ * Execute configuration change for synchronization service
+ */
+ sync_processing = 1;
+
+ totempg_callback_token_destroy (&sync_callback_token_handle);
+
+ sync_recovery_index = 0;
+ memset (&barrier_data_confchg, 0, sizeof (barrier_data_confchg));
+ for (i = 0; i < view_list_entries; i++) {
+ barrier_data_confchg[i].nodeid = view_list[i];
+ barrier_data_confchg[i].completed = 0;
+ }
+ memcpy (barrier_data_process, barrier_data_confchg,
+ sizeof (barrier_data_confchg));
+ barrier_data_confchg_entries = view_list_entries;
+ sync_start_init (sync_ring_id);
+}
+#endif
+
+static void sync_barrier_handler (unsigned int nodeid, const void *msg)
+{
+ const struct req_exec_barrier_message *req_exec_barrier_message = msg;
+ int i;
+ int barrier_reached = 1;
+
+ if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
+ sizeof (struct memb_ring_id)) != 0) {
+
+ return;
+ }
+ for (i = 0; i < my_processor_list_entries; i++) {
+ if (my_processor_list[i].nodeid == nodeid) {
+ my_processor_list[i].received = 1;
+ }
+ }
+ for (i = 0; i < my_processor_list_entries; i++) {
+ if (my_processor_list[i].received == 0) {
+ barrier_reached = 0;
+ }
+ }
+ if (barrier_reached) {
+ my_processing_idx += 1;
+ if (my_service_list_entries == my_processing_idx) {
+ sync_synchronization_completed ();
+ } else {
+ sync_process_enter ();
+ }
+ }
+}
+
+static void dummy_sync_init (void)
+{
+}
+
+static void dummy_sync_abort (void)
+{
+}
+
+static int dummy_sync_process (void)
+{
+ return (0);
+}
+
+static void dummy_sync_activate (void)
+{
+}
+
+static int service_entry_compare (const void *a, const void *b)
+{
+ const struct service_entry *service_entry_a = a;
+ const struct service_entry *service_entry_b = b;
+
+ return (service_entry_a->service_id > service_entry_b->service_id);
+}
+
+static void sync_service_build_handler (unsigned int nodeid, const void *msg)
+{
+ const struct req_exec_service_build_message *req_exec_service_build_message = msg;
+ int i, j;
+ int barrier_reached = 1;
+ int found;
+ int qsort_trigger = 0;
+
+ if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
+ sizeof (struct memb_ring_id)) != 0) {
+
+ return;
+ }
+ for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
+
+ found = 0;
+ for (j = 0; j < my_service_list_entries; j++) {
+ if (req_exec_service_build_message->service_list[i] ==
+ my_service_list[j].service_id) {
+ found = 1;
+ break;
+ }
+ }
+ if (found == 0) {
+ my_service_list[my_service_list_entries].state =
+ INIT;
+ my_service_list[my_service_list_entries].service_id =
+ req_exec_service_build_message->service_list[i];
+ my_service_list[my_service_list_entries].sync_init =
+ dummy_sync_init;
+ my_service_list[my_service_list_entries].sync_abort =
+ dummy_sync_abort;
+ my_service_list[my_service_list_entries].sync_process =
+ dummy_sync_process;
+ my_service_list[my_service_list_entries].sync_activate =
+ dummy_sync_activate;
+ my_service_list_entries += 1;
+
+ qsort_trigger = 1;
+ }
+ }
+ if (qsort_trigger) {
+ qsort (my_service_list, my_service_list_entries,
+ sizeof (struct service_entry), service_entry_compare);
+ }
+ for (i = 0; i < my_processor_list_entries; i++) {
+ if (my_processor_list[i].nodeid == nodeid) {
+ my_processor_list[i].received = 1;
+ }
+ }
+ for (i = 0; i < my_processor_list_entries; i++) {
+ if (my_processor_list[i].received == 0) {
+ barrier_reached = 0;
+ }
+ }
+ if (barrier_reached) {
+ sync_process_enter ();
+ }
+}
+
+static void sync_deliver_fn (
+ unsigned int nodeid,
+ const void *msg,
+ unsigned int msg_len,
+ int endian_conversion_required)
+{
+ coroipc_request_header_t *header = (coroipc_request_header_t *)msg;
+
+ switch (header->id) {
+ case MESSAGE_REQ_SYNC_BARRIER:
+ sync_barrier_handler (nodeid, msg);
+ break;
+ case MESSAGE_REQ_SYNC_SERVICE_BUILD:
+ sync_service_build_handler (nodeid, msg);
+ break;
+ }
+}
+
+static void barrier_message_transmit (void)
+{
+ struct iovec iovec;
+ struct req_exec_barrier_message req_exec_barrier_message;
+ int res;
+
+ req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
+ req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
+
+ memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
+ sizeof (struct memb_ring_id));
+
+ iovec.iov_base = (char *)&req_exec_barrier_message;
+ iovec.iov_len = sizeof (req_exec_barrier_message);
+
+ res = totempg_groups_mcast_joined (sync_group_handle,
+ &iovec, 1, TOTEMPG_AGREED);
+}
+
+static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
+{
+ struct iovec iovec;
+ int res;
+
+ service_build_message->header.size = sizeof (struct req_exec_service_build_message);
+ service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
+
+ memcpy (&service_build_message->ring_id, &my_ring_id,
+ sizeof (struct memb_ring_id));
+
+ iovec.iov_base = (void *)service_build_message;
+ iovec.iov_len = sizeof (struct req_exec_service_build_message);
+
+ res = totempg_groups_mcast_joined (sync_group_handle,
+ &iovec, 1, TOTEMPG_AGREED);
+}
+
+static void sync_barrier_enter (void)
+{
+ my_state = SYNC_BARRIER;
+ barrier_message_transmit ();
+}
+
+static void sync_process_enter (void)
+{
+ int i;
+
+ my_state = SYNC_PROCESS;
+ for (i = 0; i < my_processor_list_entries; i++) {
+ my_processor_list[i].received = 0;
+ }
+ schedwrk_create (&my_schedwrk_handle,
+ schedwrk_processor,
+ NULL);
+}
+
+static void sync_servicelist_build_enter (
+ const unsigned int *member_list,
+ size_t member_list_entries,
+ const struct memb_ring_id *ring_id)
+{
+ struct req_exec_service_build_message service_build;
+ int i;
+
+ my_state = SYNC_SERVICELIST_BUILD;
+ for (i = 0; i < member_list_entries; i++) {
+ my_processor_list[i].nodeid = member_list[i];
+ my_processor_list[i].received = 0;
+ }
+ my_processor_list_entries = member_list_entries;
+
+ my_processing_idx = 0;
+
+ memcpy (my_service_list, my_initial_service_list,
+ sizeof (struct service_entry) *
+ my_initial_service_list_entries);
+ my_service_list_entries = my_initial_service_list_entries;
+
+ for (i = 0; i < my_initial_service_list[i].service_id; i++) {
+ service_build.service_list[i] =
+ my_initial_service_list[i].service_id;
+ }
+ service_build.service_list_entries = i;
+
+ service_build_message_transmit (&service_build);
+}
+
+static int schedwrk_processor (const void *context)
+{
+ int res;
+
+ if (my_service_list[my_processing_idx].state == INIT) {
+ my_service_list[my_processing_idx].state = PROCESS;
+ my_service_list[my_processing_idx].sync_init ();
+ }
+ if (my_service_list[my_processing_idx].state == PROCESS) {
+ my_service_list[my_processing_idx].state = PROCESS;
+ res = my_service_list[my_processing_idx].sync_process ();
+ if (res != -1) {
+ my_service_list[my_processing_idx].state = ACTIVATE;
+ } else {
+ return (-1);
+ }
+ }
+ if (my_service_list[my_processing_idx].state == ACTIVATE) {
+ my_service_list[my_processing_idx].state = ACTIVATE;
+ my_service_list[my_processing_idx].sync_activate ();
+ sync_barrier_enter();
+ }
+ return (0);
+}
+
+static void sync_confchg_fn (
+ enum totem_configuration_type configuration_type,
+ const unsigned int *member_list, size_t member_list_entries,
+ const unsigned int *left_list, size_t left_list_entries,
+ const unsigned int *joined_list, size_t joined_list_entries,
+ const struct memb_ring_id *ring_id)
+{
+ if (configuration_type != TOTEM_CONFIGURATION_REGULAR) {
+ return;
+ }
+ if (my_state == SYNC_PROCESS) {
+ schedwrk_destroy (my_schedwrk_handle);
+ my_service_list[my_processing_idx].sync_abort ();
+ }
+
+ memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
+
+ sync_servicelist_build_enter (member_list, member_list_entries, ring_id);
+}
Index: exec/syncv2.h
===================================================================
--- exec/syncv2.h (revision 0)
+++ exec/syncv2.h (revision 0)
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2009 Red Hat, Inc.
+ *
+ * All rights reserved.
+ *
+ * Author: Steven Dake ([email protected])
+ *
+ * This software licensed under BSD license, the text of which follows:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * - Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * - Neither the name of the MontaVista Software, Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from this
+ * software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SYNCV2_H_DEFINED
+#define SYNCV2_H_DEFINED
+
+#include "sync.h"
+
+int sync_v2_register (
+ int (*sync_callbacks_retrieve) (
+ int service_id,
+ struct sync_callbacks *callbacks),
+ void (*synchronization_completed) (void));
+
+//int sync_in_process (void);
+
+//int sync_primary_designated (void);
+
+/**
+ * Execute synchronization upon request for the named service
+ * @param name service handler name to synchronize
+ *
+ * @return int 0 OK, error code otherwise
+ */
+//extern int sync_request (const char *name);
+
+#endif /* SYNC_H_DEFINED */
Index: exec/main.c
===================================================================
--- exec/main.c (revision 2301)
+++ exec/main.c (working copy)
@@ -76,6 +76,7 @@
#include "totemconfig.h"
#include "main.h"
#include "sync.h"
+#include "syncv2.h"
#include "tlist.h"
#include "timer.h"
#include "util.h"
@@ -226,6 +227,7 @@
static void corosync_sync_completed (void)
{
+ printf ("sync is completed\n");
}
static int corosync_sync_callbacks_retrieve (int sync_id,
@@ -259,6 +261,27 @@
return (0);
}
+static int corosync_sync_v2_callbacks_retrieve (
+ int service_id,
+ struct sync_callbacks *callbacks)
+{
+ if (ais_service[service_id] == NULL) {
+ return (-1);
+ }
+ if (minimum_sync_mode == CS_SYNC_V1 && ais_service[service_id]->sync_mode != CS_SYNC_V2) {
+printf ("returning -1 %d\n", service_id);
+ return (-1);
+ }
+
+ callbacks->name = ais_service[service_id]->name;
+ callbacks->sync_init = ais_service[service_id]->sync_init;
+ callbacks->sync_process = ais_service[service_id]->sync_process;
+printf ("process %p\n", ais_service[service_id]->sync_process);
+ callbacks->sync_activate = ais_service[service_id]->sync_activate;
+ callbacks->sync_abort = ais_service[service_id]->sync_abort;
+ return (0);
+}
+
static struct memb_ring_id corosync_ring_id;
static void confchg_fn (
@@ -941,8 +964,10 @@
}
evil_init (api);
- sync_register (corosync_sync_callbacks_retrieve, corosync_sync_completed);
+// sync_register (corosync_sync_callbacks_retrieve, corosync_sync_completed);
+ sync_v2_register (corosync_sync_v2_callbacks_retrieve, corosync_sync_completed);
+
/*
* Drop root privleges to user 'ais'
* TODO: Don't really need full root capabilities;
Index: exec/Makefile.am
===================================================================
--- exec/Makefile.am (revision 2301)
+++ exec/Makefile.am (working copy)
@@ -53,7 +53,7 @@
corosync_SOURCES = main.c util.c sync.c apidef.c service.c \
timer.c totemconfig.c mainconfig.c quorum.c schedwrk.c \
- ../lcr/lcr_ifact.c evil.c
+ ../lcr/lcr_ifact.c evil.c syncv2.c
corosync_LDADD = -ltotem_pg -llogsys -lcoroipcs
corosync_DEPENDENCIES = libtotem_pg.so.$(SONAME) liblogsys.so.$(SONAME) libcoroipcs.so.$(SONAME)
corosync_LDFLAGS = $(OS_DYFLAGS) -L./
@@ -69,7 +69,7 @@
noinst_HEADERS = apidef.h crypto.h mainconfig.h main.h \
quorum.h service.h sync.h timer.h tlist.h totemconfig.h \
totemmrp.h totemnet.h totemrrp.h totemsrp.h util.h \
- version.h vsf.h wthread.h schedwrk.h evil.h
+ version.h vsf.h wthread.h schedwrk.h evil.h syncv2.h
EXTRA_DIST = $(LCRSO_SRC)
Differences from v1 algorithm:
The state machine now builds a list of services that are contained on every
node.
requirements of algorithm:
dependencies must always have a lower service id then dependendent services
ie:
2 depends on 1 (allowed)
1 depends on 2 (not allowed)
data structure per node:
enum sync_process_state {
NOTPROCESSED,
INIT,
PROCESS,
ABORT,
ACTIVATE
}
enum sync_v2_state {
SYNC_SERVICELIST_BUILD
SYNC_PROCESS
SYNC_BARRIER
}
struct my_service_entry {
int service_id;
struct process_callbacks;
int state;
};
struct my_processor_entry {
int nodeid;
int received;
};
struct my_processor_list[128];
int my_processor_list_entries = 0;
struct my_service_entry my_service_list[128];
int my_service_list_entries = 0;
On event
--------
configuration change
--------------------
if state == SYNC_PROCESS and configuration change == regular
destroy work handler
execute my_service_list[my_service_idx].sync_abort
if configuration change is regular
enter SYNC_V2_SERVICELIST_BUILD(processor list)
receipt of service_build message
----------------------------------
if service_build.ring_id == my_ring_id
merge service_build entries into my_service_list
if there are new entries, set the callbacks to dummy handlers
set received to 1 for nodeid in my_processor_list
if for all processors in my processor list received is 1
enter SYNC_PROCESS
receipt of barrier message
--------------------------
set my_processor_list received for message's nodeid to 1
if for all processors in my_processor_list received is 1
increment process_idx
if my_service_list_entries == my_processing_idx
call callback to indicate sync done
else
enter SYNC_PROCESS
execution of work handler callback:
-----------------------------------
if my_service_list[my_processing_idx].state == NOTPROCESSED
set sync_process.state = INIT
call my_service_list[processing_idx].sync_init
if my_service_list[my_processing_idx].state == INIT
set sync_process.state = PROCESS
if my_service_list[my_processing_idx].state == PROCESS
call my_service_list[processing_idx].sync_process
if process returns 0 (indicates completed)
set my_service_list[processing_idx].ss.state = ACTIVATE
if my_service_list[my_processing_idx].state == ACTIVATE
call my_service_list[processing_idx].sync_activate
enter SYNC_BARRIER
return -1 (indicating totempg callback should stop)
return 0 (indicating totempg processing of callback should continue)
pseudocode for enter operations:
enter SYNC_SERVICELIST_BUILD (arguments are member list node ids)
set state to SYNC_V2_SERVICELIST_BUILD
reset my_processor_list with argument setting received flag to 0
create my_service_list
set my_processing_idx = 0
create service_build message from my_service_list
transmit service_build message
enter SYNC_PROCESS (no arguments)
set state to SYNC_PROCESS
set received flag for all processors in my_processor_list to 0
create process work handler
enter SYNC_BARRIER (no arguments)
set state to SYNC_BARRIER
transmit barrier message for this ring id
_______________________________________________
Openais mailing list
[email protected]
https://lists.linux-foundation.org/mailman/listinfo/openais