corosync  2.4.5
sync.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2009-2012 Red Hat, Inc.
3  *
4  * All rights reserved.
5  *
6  * Author: Steven Dake (sdake@redhat.com)
7  *
8  * This software licensed under BSD license, the text of which follows:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * - Redistributions of source code must retain the above copyright notice,
14  * this list of conditions and the following disclaimer.
15  * - Redistributions in binary form must reproduce the above copyright notice,
16  * this list of conditions and the following disclaimer in the documentation
17  * and/or other materials provided with the distribution.
18  * - Neither the name of the MontaVista Software, Inc. nor the names of its
19  * contributors may be used to endorse or promote products derived from this
20  * software without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32  * THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 #include <config.h>
35 
36 #include <sys/types.h>
37 #include <sys/socket.h>
38 #include <sys/un.h>
39 #include <sys/ioctl.h>
40 #include <netinet/in.h>
41 #include <sys/uio.h>
42 #include <unistd.h>
43 #include <fcntl.h>
44 #include <stdlib.h>
45 #include <stdio.h>
46 #include <errno.h>
47 #include <time.h>
48 #include <arpa/inet.h>
49 
50 #include <corosync/corotypes.h>
51 #include <corosync/swab.h>
52 #include <corosync/totem/totempg.h>
53 #include <corosync/totem/totem.h>
54 #include <corosync/logsys.h>
55 #include <qb/qbipc_common.h>
56 #include "schedwrk.h"
57 #include "quorum.h"
58 #include "sync.h"
59 #include "main.h"
60 
61 LOGSYS_DECLARE_SUBSYS ("SYNC");
62 
63 #define MESSAGE_REQ_SYNC_BARRIER 0
64 #define MESSAGE_REQ_SYNC_SERVICE_BUILD 1
65 
69 };
70 
71 enum sync_state {
75 };
76 
77 struct service_entry {
79  void (*sync_init) (
80  const unsigned int *trans_list,
81  size_t trans_list_entries,
82  const unsigned int *member_list,
83  size_t member_list_entries,
84  const struct memb_ring_id *ring_id);
85  void (*sync_abort) (void);
86  int (*sync_process) (void);
87  void (*sync_activate) (void);
89  char name[128];
90 };
91 
93  int nodeid;
94  int received;
95 };
96 
98  struct qb_ipc_request_header header __attribute__((aligned(8)));
99  struct memb_ring_id ring_id __attribute__((aligned(8)));
100  int service_list_entries __attribute__((aligned(8)));
101  int service_list[128] __attribute__((aligned(8)));
102 };
103 
105  struct qb_ipc_request_header header __attribute__((aligned(8)));
106  struct memb_ring_id ring_id __attribute__((aligned(8)));
107 };
108 
109 static enum sync_state my_state = SYNC_BARRIER;
110 
111 static struct memb_ring_id my_ring_id;
112 
113 static int my_processing_idx = 0;
114 
115 static hdb_handle_t my_schedwrk_handle;
116 
117 static struct processor_entry my_processor_list[PROCESSOR_COUNT_MAX];
118 
119 static unsigned int my_member_list[PROCESSOR_COUNT_MAX];
120 
121 static unsigned int my_trans_list[PROCESSOR_COUNT_MAX];
122 
123 static size_t my_member_list_entries = 0;
124 
125 static size_t my_trans_list_entries = 0;
126 
127 static int my_processor_list_entries = 0;
128 
129 static struct service_entry my_service_list[SERVICES_COUNT_MAX];
130 
131 static int my_service_list_entries = 0;
132 
133 static void (*sync_synchronization_completed) (void);
134 
135 static void sync_deliver_fn (
136  unsigned int nodeid,
137  const void *msg,
138  unsigned int msg_len,
139  int endian_conversion_required);
140 
141 static int schedwrk_processor (const void *context);
142 
143 static void sync_process_enter (void);
144 
145 static void sync_process_call_init (void);
146 
147 static struct totempg_group sync_group = {
148  .group = "sync",
149  .group_len = 4
150 };
151 
152 static void *sync_group_handle;
153 
155  int service_id,
156  struct sync_callbacks *callbacks);
157 
159  int (*sync_callbacks_retrieve) (
160  int service_id,
161  struct sync_callbacks *callbacks),
162  void (*synchronization_completed) (void))
163 {
164  unsigned int res;
165 
167  &sync_group_handle,
168  sync_deliver_fn,
169  NULL);
170  if (res == -1) {
172  "Couldn't initialize groups interface.");
173  return (-1);
174  }
175 
176  res = totempg_groups_join (
177  sync_group_handle,
178  &sync_group,
179  1);
180  if (res == -1) {
181  log_printf (LOGSYS_LEVEL_ERROR, "Couldn't join group.");
182  return (-1);
183  }
184 
185  sync_synchronization_completed = synchronization_completed;
186  my_sync_callbacks_retrieve = sync_callbacks_retrieve;
187 
188  return (0);
189 }
190 
191 static void sync_barrier_handler (unsigned int nodeid, const void *msg)
192 {
194  int i;
195  int barrier_reached = 1;
196 
197  if (memcmp (&my_ring_id, &req_exec_barrier_message->ring_id,
198  sizeof (struct memb_ring_id)) != 0) {
199 
200  log_printf (LOGSYS_LEVEL_DEBUG, "barrier for old ring - discarding");
201  return;
202  }
203  for (i = 0; i < my_processor_list_entries; i++) {
204  if (my_processor_list[i].nodeid == nodeid) {
205  my_processor_list[i].received = 1;
206  }
207  }
208  for (i = 0; i < my_processor_list_entries; i++) {
209  if (my_processor_list[i].received == 0) {
210  barrier_reached = 0;
211  }
212  }
213  if (barrier_reached) {
214  log_printf (LOGSYS_LEVEL_DEBUG, "Committing synchronization for %s",
215  my_service_list[my_processing_idx].name);
216  my_service_list[my_processing_idx].state = ACTIVATE;
217 
218  if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
219  my_service_list[my_processing_idx].sync_activate ();
220  }
221 
222  my_processing_idx += 1;
223  if (my_service_list_entries == my_processing_idx) {
224  sync_synchronization_completed ();
225  } else {
226  sync_process_enter ();
227  }
228  }
229 }
230 
231 static void dummy_sync_abort (void)
232 {
233 }
234 
235 static int dummy_sync_process (void)
236 {
237  return (0);
238 }
239 
240 static void dummy_sync_activate (void)
241 {
242 }
243 
244 static int service_entry_compare (const void *a, const void *b)
245 {
246  const struct service_entry *service_entry_a = a;
247  const struct service_entry *service_entry_b = b;
248 
249  return (service_entry_a->service_id > service_entry_b->service_id);
250 }
251 
252 static void sync_service_build_handler (unsigned int nodeid, const void *msg)
253 {
255  int i, j;
256  int barrier_reached = 1;
257  int found;
258  int qsort_trigger = 0;
259 
260  if (memcmp (&my_ring_id, &req_exec_service_build_message->ring_id,
261  sizeof (struct memb_ring_id)) != 0) {
262  log_printf (LOGSYS_LEVEL_DEBUG, "service build for old ring - discarding");
263  return;
264  }
265  for (i = 0; i < req_exec_service_build_message->service_list_entries; i++) {
266 
267  found = 0;
268  for (j = 0; j < my_service_list_entries; j++) {
269  if (req_exec_service_build_message->service_list[i] ==
270  my_service_list[j].service_id) {
271  found = 1;
272  break;
273  }
274  }
275  if (found == 0) {
276  my_service_list[my_service_list_entries].state = PROCESS;
277  my_service_list[my_service_list_entries].service_id =
278  req_exec_service_build_message->service_list[i];
279  sprintf (my_service_list[my_service_list_entries].name,
280  "Unknown External Service (id = %d)\n",
281  req_exec_service_build_message->service_list[i]);
282  my_service_list[my_service_list_entries].sync_init =
283  NULL;
284  my_service_list[my_service_list_entries].sync_abort =
285  dummy_sync_abort;
286  my_service_list[my_service_list_entries].sync_process =
287  dummy_sync_process;
288  my_service_list[my_service_list_entries].sync_activate =
289  dummy_sync_activate;
290  my_service_list_entries += 1;
291 
292  qsort_trigger = 1;
293  }
294  }
295  if (qsort_trigger) {
296  qsort (my_service_list, my_service_list_entries,
297  sizeof (struct service_entry), service_entry_compare);
298  }
299  for (i = 0; i < my_processor_list_entries; i++) {
300  if (my_processor_list[i].nodeid == nodeid) {
301  my_processor_list[i].received = 1;
302  }
303  }
304  for (i = 0; i < my_processor_list_entries; i++) {
305  if (my_processor_list[i].received == 0) {
306  barrier_reached = 0;
307  }
308  }
309  if (barrier_reached) {
310  log_printf (LOGSYS_LEVEL_DEBUG, "enter sync process");
311  sync_process_enter ();
312  }
313 }
314 
315 static void sync_deliver_fn (
316  unsigned int nodeid,
317  const void *msg,
318  unsigned int msg_len,
319  int endian_conversion_required)
320 {
321  struct qb_ipc_request_header *header = (struct qb_ipc_request_header *)msg;
322 
323  switch (header->id) {
325  sync_barrier_handler (nodeid, msg);
326  break;
328  sync_service_build_handler (nodeid, msg);
329  break;
330  }
331 }
332 
333 static void barrier_message_transmit (void)
334 {
335  struct iovec iovec;
336  struct req_exec_barrier_message req_exec_barrier_message;
337 
338  req_exec_barrier_message.header.size = sizeof (struct req_exec_barrier_message);
339  req_exec_barrier_message.header.id = MESSAGE_REQ_SYNC_BARRIER;
340 
341  memcpy (&req_exec_barrier_message.ring_id, &my_ring_id,
342  sizeof (struct memb_ring_id));
343 
344  iovec.iov_base = (char *)&req_exec_barrier_message;
345  iovec.iov_len = sizeof (req_exec_barrier_message);
346 
347  (void)totempg_groups_mcast_joined (sync_group_handle,
348  &iovec, 1, TOTEMPG_AGREED);
349 }
350 
351 static void service_build_message_transmit (struct req_exec_service_build_message *service_build_message)
352 {
353  struct iovec iovec;
354 
355  service_build_message->header.size = sizeof (struct req_exec_service_build_message);
356  service_build_message->header.id = MESSAGE_REQ_SYNC_SERVICE_BUILD;
357 
358  memcpy (&service_build_message->ring_id, &my_ring_id,
359  sizeof (struct memb_ring_id));
360 
361  iovec.iov_base = (void *)service_build_message;
362  iovec.iov_len = sizeof (struct req_exec_service_build_message);
363 
364  (void)totempg_groups_mcast_joined (sync_group_handle,
365  &iovec, 1, TOTEMPG_AGREED);
366 }
367 
368 static void sync_barrier_enter (void)
369 {
370  my_state = SYNC_BARRIER;
371  barrier_message_transmit ();
372 }
373 
374 static void sync_process_call_init (void)
375 {
376  unsigned int old_trans_list[PROCESSOR_COUNT_MAX];
377  size_t old_trans_list_entries = 0;
378  int o, m;
379  int i;
380 
381  memcpy (old_trans_list, my_trans_list, my_trans_list_entries *
382  sizeof (unsigned int));
383  old_trans_list_entries = my_trans_list_entries;
384 
385  my_trans_list_entries = 0;
386  for (o = 0; o < old_trans_list_entries; o++) {
387  for (m = 0; m < my_member_list_entries; m++) {
388  if (old_trans_list[o] == my_member_list[m]) {
389  my_trans_list[my_trans_list_entries] = my_member_list[m];
390  my_trans_list_entries++;
391  break;
392  }
393  }
394  }
395 
396  for (i = 0; i < my_service_list_entries; i++) {
397  if (my_sync_callbacks_retrieve(my_service_list[i].service_id, NULL) != -1) {
398  my_service_list[i].sync_init (my_trans_list,
399  my_trans_list_entries, my_member_list,
400  my_member_list_entries,
401  &my_ring_id);
402  }
403  }
404 }
405 
406 static void sync_process_enter (void)
407 {
408  int i;
409 
410  my_state = SYNC_PROCESS;
411 
412  /*
413  * No sync services
414  */
415  if (my_service_list_entries == 0) {
416  my_state = SYNC_SERVICELIST_BUILD;
417  sync_synchronization_completed ();
418  return;
419  }
420  for (i = 0; i < my_processor_list_entries; i++) {
421  my_processor_list[i].received = 0;
422  }
423 
424  schedwrk_create (&my_schedwrk_handle,
425  schedwrk_processor,
426  NULL);
427 }
428 
429 static void sync_servicelist_build_enter (
430  const unsigned int *member_list,
431  size_t member_list_entries,
432  const struct memb_ring_id *ring_id)
433 {
434  struct req_exec_service_build_message service_build;
435  int i;
436  int res;
438 
439  my_state = SYNC_SERVICELIST_BUILD;
440  for (i = 0; i < member_list_entries; i++) {
441  my_processor_list[i].nodeid = member_list[i];
442  my_processor_list[i].received = 0;
443  }
444  my_processor_list_entries = member_list_entries;
445 
446  memcpy (my_member_list, member_list,
447  member_list_entries * sizeof (unsigned int));
448  my_member_list_entries = member_list_entries;
449 
450  my_processing_idx = 0;
451 
452  memset(my_service_list, 0, sizeof (struct service_entry) * SERVICES_COUNT_MAX);
453  my_service_list_entries = 0;
454 
455  for (i = 0; i < SERVICES_COUNT_MAX; i++) {
457  if (res == -1) {
458  continue;
459  }
460  if (sync_callbacks.sync_init == NULL) {
461  continue;
462  }
463  my_service_list[my_service_list_entries].state = PROCESS;
464  my_service_list[my_service_list_entries].service_id = i;
465  strcpy (my_service_list[my_service_list_entries].name,
467  my_service_list[my_service_list_entries].sync_init = sync_callbacks.sync_init;
468  my_service_list[my_service_list_entries].sync_process = sync_callbacks.sync_process;
469  my_service_list[my_service_list_entries].sync_abort = sync_callbacks.sync_abort;
470  my_service_list[my_service_list_entries].sync_activate = sync_callbacks.sync_activate;
471  my_service_list_entries += 1;
472  }
473 
474  for (i = 0; i < my_service_list_entries; i++) {
475  service_build.service_list[i] =
476  my_service_list[i].service_id;
477  }
478  service_build.service_list_entries = my_service_list_entries;
479 
480  service_build_message_transmit (&service_build);
481 
482  log_printf (LOGSYS_LEVEL_DEBUG, "call init for locally known services");
483  sync_process_call_init ();
484 }
485 
486 static int schedwrk_processor (const void *context)
487 {
488  int res = 0;
489 
490  if (my_service_list[my_processing_idx].state == PROCESS) {
491  if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
492  res = my_service_list[my_processing_idx].sync_process ();
493  } else {
494  res = 0;
495  }
496  if (res == 0) {
497  sync_barrier_enter();
498  } else {
499  return (-1);
500  }
501  }
502  return (0);
503 }
504 
506  const unsigned int *member_list,
507  size_t member_list_entries,
508  const struct memb_ring_id *ring_id)
509 {
510  ENTER();
511  memcpy (&my_ring_id, ring_id, sizeof (struct memb_ring_id));
512 
513  sync_servicelist_build_enter (member_list, member_list_entries,
514  ring_id);
515 }
516 
518  const unsigned int *member_list,
519  size_t member_list_entries,
520  const struct memb_ring_id *ring_id)
521 {
522  ENTER();
523  memcpy (my_trans_list, member_list, member_list_entries *
524  sizeof (unsigned int));
525  my_trans_list_entries = member_list_entries;
526 }
527 
528 void sync_abort (void)
529 {
530  ENTER();
531  if (my_state == SYNC_PROCESS) {
532  schedwrk_destroy (my_schedwrk_handle);
533  if (my_sync_callbacks_retrieve(my_service_list[my_processing_idx].service_id, NULL) != -1) {
534  my_service_list[my_processing_idx].sync_abort ();
535  }
536  }
537 
538  /* this will cause any "old" barrier messages from causing
539  * problems.
540  */
541  memset (&my_ring_id, 0, sizeof (struct memb_ring_id));
542 }