Commit 3c134468 authored by Yossi Gottlieb's avatar Yossi Gottlieb
Browse files

Merge remote-tracking branch 'upstream/master'

parents 876f8d7c 9b983183
......@@ -72,9 +72,7 @@ tests_full:
.PHONY: test_virtraft
test_virtraft:
cp src/*.c virtraft/deps/raft/
cp include/*.h virtraft/deps/raft/
cd virtraft; make clean; make; make tests
python3 tests/virtraft2.py --servers 7 -i 20000 --compaction_rate 50 --drop_rate 5 -P 10 --seed 1 -m 3
.PHONY: amalgamation
amalgamation:
......
......@@ -27,12 +27,43 @@ Quality Assurance
We use the following methods to ensure that the library is safe:
* A `simulator <https://github.com/willemt/virtraft>`_ is used to test the Raft invariants on unreliable networks
* A simulator (virtraft2) is used to test the Raft invariants on unreliable networks
* `Fuzzing/property-based-testing <https://github.com/willemt/virtraft/blob/master/tests/test_fuzzer.py>`_ via `Hypothesis <https://github.com/DRMacIver/hypothesis/>`_
* All bugs have regression tests
* Many unit tests
* `Usage <https://github.com/willemt/ticketd>`_
virtraft2
---------
This cluster simulator checks the following:
* Log Matching (servers must have matching logs)
* State Machine Safety (applied entries have the same ID)
* Election Safety (only one valid leader per term)
* Current Index Validity (does the current index have an existing entry?)
* Entry ID Monotonicity (entries aren't appended out of order)
* Committed entry popping (committed entries are not popped from the log)
* Log Accuracy (does the server's log match mirror an independent log?)
* Deadlock detection (does the cluster continuously make progress?)
Chaos generated by virtraft2:
* Random bi-directional partitions between nodes
* Message dropping
* Message duplication
* Membership change injection
* Random compactions
Run the simulator using:
.. code-block:: bash
:class: ignore
make test_virtraft
virtraft2 succeeds `virtraft <https://github.com/willemt/virtraft>`_
Single file amalgamation
========================
......@@ -440,7 +471,7 @@ The process works like this:
2. Save the current membership details to the snapshot.
3. Save the finite state machine to the snapshot.
4. End snapshotting with ``raft_end_snapshot``.
5. When the ``send_snapshot`` callback fires, the user must propogate the snapshot to the other node.
5. When the ``send_snapshot`` callback fires, the user must propogate the snapshot to the peer.
6. Once the peer has the snapshot, they call ``raft_begin_load_snapshot``.
7. Peer calls ``raft_add_node`` to add nodes as per the snapshot's membership info.
8. Peer calls ``raft_node_set_voting`` to nodes as per the snapshot's membership info.
......
......@@ -12,14 +12,21 @@
#include "raft_types.h"
#define RAFT_ERR_NOT_LEADER -2
#define RAFT_ERR_ONE_VOTING_CHANGE_ONLY -3
#define RAFT_ERR_SHUTDOWN -4
#define RAFT_ERR_NOMEM -5
#define RAFT_ERR_NEEDS_SNAPSHOT -6
#define RAFT_ERR_SNAPSHOT_IN_PROGRESS -7
#define RAFT_ERR_SNAPSHOT_ALREADY_LOADED -8
#define RAFT_ERR_LAST -100
typedef enum {
RAFT_ERR_NOT_LEADER=-2,
RAFT_ERR_ONE_VOTING_CHANGE_ONLY=-3,
RAFT_ERR_SHUTDOWN=-4,
RAFT_ERR_NOMEM=-5,
RAFT_ERR_NEEDS_SNAPSHOT=-6,
RAFT_ERR_SNAPSHOT_IN_PROGRESS=-7,
RAFT_ERR_SNAPSHOT_ALREADY_LOADED=-8,
RAFT_ERR_LAST=-100,
} raft_error_e;
typedef enum {
RAFT_MEMBERSHIP_ADD,
RAFT_MEMBERSHIP_REMOVE,
} raft_membership_e;
#define RAFT_REQUESTVOTE_ERR_GRANTED 1
#define RAFT_REQUESTVOTE_ERR_NOT_GRANTED 0
......@@ -64,7 +71,6 @@ typedef enum {
* Removing nodes is a 2 step process: first demote, then remove.
*/
RAFT_LOGTYPE_REMOVE_NODE,
RAFT_LOGTYPE_SNAPSHOT,
/**
* Users can piggyback the entry mechanism by specifying log types that
* are higher than RAFT_LOGTYPE_NUM.
......@@ -229,7 +235,7 @@ typedef int (
msg_appendentries_t* msg
);
/**
/**
* Log compaction
* Callback for telling the user to send a snapshot.
*
......@@ -297,7 +303,7 @@ typedef int (
* @param[in] raft The Raft server making this callback
* @param[in] user_data User data that is passed from Raft server
* @param[in] term Current term
* @param[in] vote The node value dicating we haven't voted for anybody
* @param[in] vote The node value dictating we haven't voted for anybody
* @return 0 on success */
typedef int (
*func_persist_term_f
......@@ -337,6 +343,25 @@ typedef int (
raft_index_t entry_idx
);
/** Callback for being notified of membership changes.
*
* Implementing this callback is optional.
*
* Remove notification happens before the node is about to be removed.
*
* @param[in] raft The Raft server making this callback
* @param[in] user_data User data that is passed from Raft server
* @param[in] node The node that is the subject of this log. Could be NULL.
* @param[in] type The type of membership change */
typedef void (
*func_membership_event_f
) (
raft_server_t* raft,
void *user_data,
raft_node_t *node,
raft_membership_e type
);
typedef struct
{
/** Callback for sending request vote messages */
......@@ -388,6 +413,8 @@ typedef struct
/** Callback for detecting when a non-voting node has sufficient logs. */
func_node_has_sufficient_logs_f node_has_sufficient_logs;
func_membership_event_f notify_membership_event;
/** Callback for catching debugging log messages
* This callback is optional */
func_log_f log;
......@@ -735,12 +762,12 @@ raft_node_id_t raft_node_get_id(raft_node_t* me_);
* @return get state of type raft_state_e. */
int raft_get_state(raft_server_t* me_);
/** The the most recent log's term
/** Get the most recent log's term
* @return the last log term */
raft_term_t raft_get_last_log_term(raft_server_t* me_);
/** Turn a node into a voting node.
* Voting nodes can take part in elections and in-regards to commiting entries,
* Voting nodes can take part in elections and in-regards to committing entries,
* are counted in majorities. */
void raft_node_set_voting(raft_node_t* node, int voting);
......@@ -829,8 +856,11 @@ raft_index_t raft_get_first_entry_idx(raft_server_t* me_);
* This is usually the result of a snapshot being loaded.
* We need to send an appendentries response.
*
* This will remove all other nodes (not ourself). The user MUST use the
* snapshot to load the new membership information.
*
* @param[in] last_included_term Term of the last log of the snapshot
* @param[in] last_included_index Index of the last log of the snapshot
* @param[in] last_included_index Index of the last log of the snapshot
*
* @return
* 0 on success
......@@ -889,4 +919,19 @@ void raft_set_heap_functions(void *(*_malloc)(size_t),
void *(*_realloc)(void *, size_t),
void (*_free)(void *));
/** Confirm that a node's voting status is final
* @param[in] node The node
* @param[in] voting Whether this node's voting status is committed or not */
void raft_node_set_voting_committed(raft_node_t* me_, int voting);
/** Confirm that a node's voting status is final
* @param[in] node The node
* @param[in] committed Whether this node's membership is committed or not */
void raft_node_set_addition_committed(raft_node_t* me_, int committed);
/** Check if a voting change is in progress
* @param[in] raft The Raft server
* @return 1 if a voting change is in progress */
int raft_voting_change_is_in_progress(raft_server_t* me_);
#endif /* RAFT_H_ */
......@@ -18,8 +18,8 @@ void log_clear(log_t* me_);
/**
* Add entry to log.
* Don't add entry if we've already added this entry (based off ID)
* Don't add entries with ID=0
* @return 0 if unsucessful; 1 otherwise */
* Don't add entries with ID=0
* @return 0 if unsuccessful; 1 otherwise */
int log_append_entry(log_t* me_, raft_entry_t* c);
/**
......
/**
* Copyright (c) 2013, Willem-Hendrik Thiart
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
* found in the LICENSE file.
*
* @file
* @author Willem Thiart himself@willemthiart.com
......@@ -46,7 +46,7 @@ typedef struct {
/* amount of time left till timeout */
int timeout_elapsed;
raft_node_t* nodes;
int num_nodes;
......
......@@ -80,21 +80,7 @@ int log_load_from_snapshot(log_t *me_, raft_index_t idx, raft_term_t term)
log_private_t* me = (log_private_t*)me_;
log_clear(me_);
raft_entry_t ety;
ety.data.len = 0;
ety.id = 1;
ety.term = term;
ety.type = RAFT_LOGTYPE_SNAPSHOT;
int e = log_append_entry(me_, &ety);
if (e != 0)
{
assert(0);
return e;
}
me->base = idx - 1;
me->base = idx;
return 0;
}
......@@ -202,10 +188,7 @@ raft_entry_t* log_get_at_idx(log_t* me_, raft_index_t idx)
if (idx == 0)
return NULL;
if (idx <= me->base)
return NULL;
if (me->base + me->count < idx)
if (me->base + me->count < idx || idx <= me->base)
return NULL;
/* idx starts at 1 */
......
......@@ -233,7 +233,7 @@ int raft_periodic(raft_server_t* me_, int msec_since_last_period)
if (me->request_timeout <= me->timeout_elapsed)
raft_send_appendentries_all(me_);
}
else if (me->election_timeout_rand <= me->timeout_elapsed &&
else if (me->election_timeout_rand <= me->timeout_elapsed &&
/* Don't become the leader when building snapshots or bad things will
* happen when we get a client request */
!raft_snapshot_is_in_progress(me_))
......@@ -431,15 +431,25 @@ int raft_recv_appendentries(
{
raft_entry_t* ety = raft_get_entry_from_idx(me_, ae->prev_log_idx);
/* Is a snapshot */
if (ae->prev_log_idx == me->snapshot_last_idx)
{
if (me->snapshot_last_term != ae->prev_log_term)
{
/* Should never happen; something is seriously wrong! */
__log(me_, node, "Snapshot AE prev conflicts with committed entry");
e = RAFT_ERR_SHUTDOWN;
goto out;
}
}
/* 2. Reply false if log doesn't contain an entry at prevLogIndex
whose term matches prevLogTerm (§5.3) */
if (!ety)
else if (!ety)
{
__log(me_, node, "AE no log at prev_idx %d", ae->prev_log_idx);
goto out;
}
if (ety->term != ae->prev_log_term)
else if (ety->term != ae->prev_log_term)
{
__log(me_, node, "AE term doesn't match prev_term (ie. %d vs %d) ci:%d comi:%d lcomi:%d pli:%d",
ety->term, ae->prev_log_term, raft_get_current_idx(me_),
......@@ -528,7 +538,7 @@ static int __should_grant_vote(raft_server_private_t* me, msg_requestvote_t* vr)
if (vr->term < raft_get_current_term((void*)me))
return 0;
/* TODO: if voted for is candiate return 1 (if below checks pass) */
/* TODO: if voted for is candidate return 1 (if below checks pass) */
if (raft_already_voted((void*)me))
return 0;
......@@ -541,10 +551,20 @@ static int __should_grant_vote(raft_server_private_t* me, msg_requestvote_t* vr)
return 1;
raft_entry_t* ety = raft_get_entry_from_idx((void*)me, current_idx);
if (ety->term < vr->last_log_term)
int ety_term;
// TODO: add test
if (ety)
ety_term = ety->term;
else if (!ety && me->snapshot_last_idx == current_idx)
ety_term = me->snapshot_last_term;
else
return 0;
if (ety_term < vr->last_log_term)
return 1;
if (vr->last_log_term == ety->term && current_idx <= vr->last_log_idx)
if (vr->last_log_term == ety_term && current_idx <= vr->last_log_idx)
return 1;
return 0;
......@@ -591,7 +611,7 @@ int raft_recv_requestvote(raft_server_t* me_,
else
r->vote_granted = 0;
/* there must be in an election. */
/* must be in an election. */
me->current_leader = NULL;
me->timeout_elapsed = 0;
......@@ -722,6 +742,7 @@ int raft_recv_entry(raft_server_t* me_,
int e = raft_append_entry(me_, ety);
if (0 != e)
return e;
for (i = 0; i < me->num_nodes; i++)
{
raft_node_t* node = me->nodes[i];
......@@ -748,6 +769,7 @@ int raft_recv_entry(raft_server_t* me_,
r->idx = raft_get_current_idx(me_);
r->term = me->current_term;
/* FIXME: is this required if raft_append_entry does this too? */
if (raft_entry_is_voting_cfg_change(ety))
me->voting_cfg_change_log_idx = raft_get_current_idx(me_);
......@@ -892,11 +914,16 @@ int raft_send_appendentries(raft_server_t* me_, raft_node_t* node)
if (1 < next_idx)
{
raft_entry_t* prev_ety = raft_get_entry_from_idx(me_, next_idx - 1);
ae.prev_log_idx = next_idx - 1;
if (prev_ety)
ae.prev_log_term = prev_ety->term;
else
if (!prev_ety)
{
ae.prev_log_idx = me->snapshot_last_idx;
ae.prev_log_term = me->snapshot_last_term;
}
else
{
ae.prev_log_idx = next_idx - 1;
ae.prev_log_term = prev_ety->term;
}
}
__log(me_, node, "sending appendentries node: ci:%d comi:%d t:%d lc:%d pli:%d plt:%d",
......@@ -961,7 +988,12 @@ raft_node_t* raft_add_node(raft_server_t* me_, void* udata, raft_node_id_t id, i
if (is_self)
me->node = me->nodes[me->num_nodes - 1];
return me->nodes[me->num_nodes - 1];
node = me->nodes[me->num_nodes - 1];
if (me->cb.notify_membership_event)
me->cb.notify_membership_event(me_, raft_get_udata(me_), node, RAFT_MEMBERSHIP_ADD);
return node;
}
raft_node_t* raft_add_non_voting_node(raft_server_t* me_, void* udata, raft_node_id_t id, int is_self)
......@@ -981,6 +1013,9 @@ void raft_remove_node(raft_server_t* me_, raft_node_t* node)
{
raft_server_private_t* me = (raft_server_private_t*)me_;
if (me->cb.notify_membership_event)
me->cb.notify_membership_event(me_, raft_get_udata(me_), node, RAFT_MEMBERSHIP_REMOVE);
assert(node);
int i, found = 0;
......@@ -1104,16 +1139,14 @@ void raft_offer_log(raft_server_t* me_, raft_entry_t* ety, const raft_index_t id
}
else if (!node)
{
raft_node_t* node = raft_add_non_voting_node(me_, NULL, node_id, is_self);
node = raft_add_non_voting_node(me_, NULL, node_id, is_self);
assert(node);
}
}
break;
case RAFT_LOGTYPE_ADD_NODE:
if (!node) {
node = raft_add_node(me_, NULL, node_id, is_self);
}
node = raft_add_node(me_, NULL, node_id, is_self);
assert(node);
assert(raft_node_is_voting(node));
break;
......@@ -1246,7 +1279,7 @@ int raft_begin_snapshot(raft_server_t *me_)
me->snapshot_in_progress = 1;
__log(me_, NULL,
"begin snapshot sli:%d slt:%d slogs:%d\n",
"begin snapshot sli:%d slt:%d slogs:%d\n",
me->snapshot_last_idx,
me->snapshot_last_term,
raft_get_num_snapshottable_logs(me_));
......@@ -1261,6 +1294,9 @@ int raft_end_snapshot(raft_server_t *me_)
if (!me->snapshot_in_progress || me->snapshot_last_idx == 0)
return -1;
assert(raft_get_num_snapshottable_logs(me_) != 0);
assert(me->snapshot_last_idx == raft_get_commit_idx(me_));
/* If needed, remove compacted logs */
raft_index_t i = log_get_base(me->log) + 1, end = raft_get_commit_idx(me_);
for (; i <= end; i++)
......@@ -1299,6 +1335,8 @@ int raft_end_snapshot(raft_server_t *me_)
}
}
assert(raft_get_log_count(me_) == 1);
return 0;
}
......@@ -1354,7 +1392,7 @@ int raft_begin_load_snapshot(
me->num_nodes = 1;
__log(me_, NULL,
"loaded snapshot sli:%d slt:%d slogs:%d\n",
"loaded snapshot sli:%d slt:%d slogs:%d\n",
me->snapshot_last_idx,
me->snapshot_last_term,
raft_get_num_snapshottable_logs(me_));
......
......@@ -151,7 +151,7 @@ void CuFail_Line(CuTest* tc, const char* file, int line, const char* message2, c
CuString string;
CuStringInit(&string);
if (message2 != NULL)
if (message2 != NULL)
{
CuStringAppend(&string, message2);
CuStringAppend(&string, ": ");
......@@ -166,7 +166,7 @@ void CuAssert_Line(CuTest* tc, const char* file, int line, const char* message,
CuFail_Line(tc, file, line, NULL, message);
}
void CuAssertStrEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
void CuAssertStrEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
const char* expected, const char* actual)
{
CuString string;
......@@ -178,7 +178,7 @@ void CuAssertStrEquals_LineMsg(CuTest* tc, const char* file, int line, const cha
}
CuStringInit(&string);
if (message != NULL)
if (message != NULL)
{
CuStringAppend(&string, message);
CuStringAppend(&string, ": ");
......@@ -191,7 +191,7 @@ void CuAssertStrEquals_LineMsg(CuTest* tc, const char* file, int line, const cha
CuFailInternal(tc, file, line, &string);
}
void CuAssertIntEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
void CuAssertIntEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
int expected, int actual)
{
char buf[STRING_MAX];
......@@ -200,7 +200,7 @@ void CuAssertIntEquals_LineMsg(CuTest* tc, const char* file, int line, const cha
CuFail_Line(tc, file, line, message, buf);
}
void CuAssertDblEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
void CuAssertDblEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
double expected, double actual, double delta)
{
char buf[STRING_MAX];
......@@ -209,7 +209,7 @@ void CuAssertDblEquals_LineMsg(CuTest* tc, const char* file, int line, const cha
CuFail_Line(tc, file, line, message, buf);
}
void CuAssertPtrEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
void CuAssertPtrEquals_LineMsg(CuTest* tc, const char* file, int line, const char* message,
void* expected, void* actual)
{
char buf[STRING_MAX];
......
......@@ -54,17 +54,17 @@ void CuTestRun(CuTest* tc);
/* Internal versions of assert functions -- use the public versions */
void CuFail_Line(CuTest* tc, const char* file, int line, const char* message2, const char* message);
void CuAssert_Line(CuTest* tc, const char* file, int line, const char* message, int condition);
void CuAssertStrEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
void CuAssertStrEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
const char* expected, const char* actual);
void CuAssertIntEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
void CuAssertIntEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
int expected, int actual);
void CuAssertDblEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
void CuAssertDblEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
double expected, double actual, double delta);
void CuAssertPtrEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
void CuAssertPtrEquals_LineMsg(CuTest* tc,
const char* file, int line, const char* message,
void* expected, void* actual);
/* public assert functions */
......
import cffi
import subprocess
ffi = cffi.FFI()
ffi.set_source(
"tests",
"""
""",
sources="""
src/raft_log.c
src/raft_server.c
src/raft_server_properties.c
src/raft_node.c
""".split(),
include_dirs=["include"],
)
library = ffi.compile()
ffi = cffi.FFI()
lib = ffi.dlopen(library)
def load(fname):
return '\n'.join(
[line for line in subprocess.check_output(
["gcc", "-E", fname]).decode('utf-8').split('\n')])
ffi.cdef('void *malloc(size_t __size);')
ffi.cdef(load('include/raft.h'))
ffi.cdef(load('include/raft_log.h'))
......@@ -399,10 +399,7 @@ void TestLog_load_from_snapshot(CuTest * tc)
CuAssertIntEquals(tc, 0, log_get_current_idx(l));
CuAssertIntEquals(tc, 0, log_load_from_snapshot(l, 10, 5));
CuAssertIntEquals(tc, 10, log_get_current_idx(l));
/* this is just a marker
* it should never be sent to any nodes because it is part of a snapshot */
CuAssertIntEquals(tc, 1, log_count(l));
CuAssertIntEquals(tc, 0, log_count(l));
}
void TestLog_load_from_snapshot_clears_log(CuTest * tc)
......@@ -422,7 +419,7 @@ void TestLog_load_from_snapshot_clears_log(CuTest * tc)
CuAssertIntEquals(tc, 2, log_get_current_idx(l));
CuAssertIntEquals(tc, 0, log_load_from_snapshot(l, 10, 5));
CuAssertIntEquals(tc, 1, log_count(l));
CuAssertIntEquals(tc, 0, log_count(l));
CuAssertIntEquals(tc, 10, log_get_current_idx(l));
}
......
......@@ -82,14 +82,6 @@ static int __raft_log_offer(raft_server_t* raft,
raft_entry_t *entry,
raft_index_t entry_idx)
{
switch (entry->type) {
case RAFT_LOGTYPE_ADD_NONVOTING_NODE:
raft_add_non_voting_node(raft, NULL, atoi(entry->data.buf), 0);
break;
case RAFT_LOGTYPE_ADD_NODE:
raft_add_node(raft, NULL, atoi(entry->data.buf), 0);
break;
}
return 0;
}
......@@ -1471,7 +1463,7 @@ void TestRaft_follower_recv_appendentries_delete_entries_if_conflict_with_new_en
char* strs[] = {"111", "222", "333"};
raft_entry_t *ety_appended;
__create_mock_entries_for_conflict_tests(tc, r, strs);
CuAssertIntEquals(tc, 3, raft_get_log_count(r));
......@@ -1967,7 +1959,7 @@ void TestRaft_follower_recv_appendentries_heartbeat_does_not_overwrite_logs(
raft_recv_appendentries(r, raft_get_node(r, 2), &ae, &aer);
/* receive a heartbeat
* NOTE: the leader hasn't received the response to the last AE so it can
* NOTE: the leader hasn't received the response to the last AE so it can
* only assume prev_Log_idx is still 1 */
memset(&ae, 0, sizeof(msg_appendentries_t));
ae.term = 1;
......@@ -3963,10 +3955,10 @@ void TestRaft_leader_recv_appendentries_response_set_has_sufficient_logs_after_v
.type = RAFT_LOGTYPE_ADD_NONVOTING_NODE
};
msg_entry_response_t etyr;
raft_recv_entry(r, &ety, &etyr);
CuAssertIntEquals(tc, 0, raft_recv_entry(r, &ety, &etyr));
ety.id++;
ety.data.buf = "3";
raft_recv_entry(r, &ety, &etyr);
CuAssertIntEquals(tc, 0, raft_recv_entry(r, &ety, &etyr));
msg_appendentries_response_t aer = {
.term = 1, .success = 1, .current_idx = 2, .first_idx = 0
......@@ -3999,4 +3991,3 @@ void TestRaft_leader_recv_appendentries_response_set_has_sufficient_logs_after_v
raft_recv_appendentries_response(r, raft_get_node(r, 2), &aer);
CuAssertIntEquals(tc, 2, has_sufficient_logs_flag);
}
......@@ -352,7 +352,7 @@ void TestRaft_follower_load_from_snapshot(CuTest * tc)
CuAssertIntEquals(tc, 0, raft_get_log_count(r));
CuAssertIntEquals(tc, 0, raft_begin_load_snapshot(r, 5, 5));
CuAssertIntEquals(tc, 0, raft_end_load_snapshot(r));
CuAssertIntEquals(tc, 1, raft_get_log_count(r));
CuAssertIntEquals(tc, 0, raft_get_log_count(r));
CuAssertIntEquals(tc, 0, raft_get_num_snapshottable_logs(r));
CuAssertIntEquals(tc, 5, raft_get_commit_idx(r));
CuAssertIntEquals(tc, 5, raft_get_last_applied_idx(r));
......@@ -408,7 +408,7 @@ void TestRaft_follower_load_from_snapshot_fails_if_already_loaded(CuTest * tc)
CuAssertIntEquals(tc, 0, raft_get_log_count(r));
CuAssertIntEquals(tc, 0, raft_begin_load_snapshot(r, 5, 5));
CuAssertIntEquals(tc, 0, raft_end_load_snapshot(r));
CuAssertIntEquals(tc, 1, raft_get_log_count(r));
CuAssertIntEquals(tc, 0, raft_get_log_count(r));
CuAssertIntEquals(tc, 0, raft_get_num_snapshottable_logs(r));
CuAssertIntEquals(tc, 5, raft_get_commit_idx(r));
CuAssertIntEquals(tc, 5, raft_get_last_applied_idx(r));
......@@ -527,7 +527,7 @@ void TestRaft_leader_sends_appendentries_when_node_next_index_was_compacted(CuTe
raft_set_current_term(r, 2);
CuAssertIntEquals(tc, 0, raft_send_appendentries(r, node));
CuAssertIntEquals(tc, 2, ae.term);
CuAssertIntEquals(tc, 2, ae.prev_log_idx);
CuAssertIntEquals(tc, 3, ae.prev_log_idx);
CuAssertIntEquals(tc, 2, ae.prev_log_term);
}
......@@ -568,3 +568,69 @@ void TestRaft_recv_entry_fails_if_snapshot_in_progress(CuTest* tc)
ety.type = RAFT_LOGTYPE_ADD_NODE;
CuAssertIntEquals(tc, RAFT_ERR_SNAPSHOT_IN_PROGRESS, raft_recv_entry(r, &ety, &cr));
}
void TestRaft_follower_recv_appendentries_is_successful_when_previous_log_idx_equals_snapshot_last_idx(
CuTest * tc)
{
raft_cbs_t funcs = {
.persist_term = __raft_persist_term,
};
void *r = raft_new();
raft_set_callbacks(r, &funcs, NULL);
raft_add_node(r, NULL, 1, 1);
raft_add_node(r, NULL, 2, 0);
CuAssertIntEquals(tc, 0, raft_begin_load_snapshot(r, 2, 2));
CuAssertIntEquals(tc, 0, raft_end_load_snapshot(r));
msg_appendentries_t ae;
msg_appendentries_response_t aer;
memset(&ae, 0, sizeof(msg_appendentries_t));
ae.term = 3;
ae.prev_log_idx = 2;
ae.prev_log_term = 2;
/* include entries */
msg_entry_t e[5];
memset(&e, 0, sizeof(msg_entry_t) * 4);
e[0].term = 3;
e[0].id = 3;
ae.entries = e;
ae.n_entries = 1;
CuAssertIntEquals(tc, 0, raft_recv_appendentries(r, raft_get_node(r, 2), &ae, &aer));
CuAssertIntEquals(tc, 1, aer.success);
}
void TestRaft_leader_sends_appendentries_with_correct_prev_log_idx_when_snapshotted(
CuTest * tc)
{
raft_cbs_t funcs = {
.send_appendentries = sender_appendentries,
.log = NULL
};
void *sender = sender_new(NULL);
void *r = raft_new();
raft_set_callbacks(r, &funcs, sender);
CuAssertTrue(tc, NULL != raft_add_node(r, NULL, 1, 1));
CuAssertTrue(tc, NULL != raft_add_node(r, NULL, 2, 0));
CuAssertIntEquals(tc, 0, raft_begin_load_snapshot(r, 2, 4));
CuAssertIntEquals(tc, 0, raft_end_load_snapshot(r));
/* i'm leader */
raft_set_state(r, RAFT_STATE_LEADER);
raft_node_t* p = raft_get_node_from_idx(r, 1);
CuAssertTrue(tc, NULL != p);
raft_node_set_next_idx(p, 4);
/* receive appendentries messages */
raft_send_appendentries(r, p);
msg_appendentries_t* ae = sender_poll_msg_data(sender);
CuAssertTrue(tc, NULL != ae);
CuAssertIntEquals(tc, 2, ae->prev_log_term);
CuAssertIntEquals(tc, 4, ae->prev_log_idx);
}
#!/usr/bin/env python
"""
virtraft2 - Simulate a raft network
Some quality checks we do:
- Log Matching (servers must have matching logs)
- State Machine Safety (applied entries have the same ID)
- Election Safety (only one valid leader per term)
- Current Index Validity (does current index have an existing entry?)
- Entry ID Monotonicity (entries aren't appended out of order)
- Committed entry popping (committed entries are not popped from the log)
- Log Accuracy (does the server's log match mirror an independent log?)
- Deadlock detection (does the cluster continuously make progress?)
Some chaos we generate:
- Random bi-directional partitions between nodes
- Message dropping
- Message duplication
- Message re-ordering
Usage:
virtraft --servers SERVERS [-d RATE] [-D RATE] [-c RATE] [-C RATE] [-m RATE]
[-P RATE] [-s SEED] [-i ITERS] [-p] [--tsv]
[-q] [-v] [-l LEVEL]
virtraft --version
virtraft --help
Options:
-n --servers SERVERS Number of servers
-d --drop_rate RATE Message drop rate 0-100 [default: 0]
-D --dupe_rate RATE Message duplication rate 0-100 [default: 0]
-c --client_rate RATE Rate entries are received from the client 0-100
[default: 100]
-C --compaction_rate RATE Rate that log compactions occur 0-100 [default: 0]
-m --member_rate RATE Membership change rate 0-100000 [default: 0]
-P --partition_rate RATE Rate that partitions occur or are healed 0-100
[default: 0]
-p --no_random_period Don't use a random period
-s --seed SEED The simulation's seed [default: 0]
-q --quiet No output at end of run
-i --iterations ITERS Number of iterations before the simulation ends
[default: 1000]
--tsv Output node status tab separated values at exit
-v --verbose Show debug logs
-l --log_level LEVEL Set log level
-V --version Display version.
-h --help Prints a short usage summary.
Examples:
Output a node status table:
virtraft --servers 3 --iterations 1000 --tsv | column -t
"""
import collections
import colorama
import coloredlogs
import docopt
import logging
import random
import re
import sys
import terminaltables
from raft_cffi import ffi, lib
NODE_DISCONNECTED = 0
NODE_CONNECTING = 1
NODE_CONNECTED = 2
NODE_DISCONNECTING = 3
class ServerDoesNotExist(Exception):
pass
def logtype2str(log_type):
if log_type == lib.RAFT_LOGTYPE_NORMAL:
return 'normal'
elif log_type == lib.RAFT_LOGTYPE_DEMOTE_NODE:
return 'demote'
elif log_type == lib.RAFT_LOGTYPE_REMOVE_NODE:
return 'remove'
elif log_type == lib.RAFT_LOGTYPE_ADD_NONVOTING_NODE:
return 'add_nonvoting'
elif log_type == lib.RAFT_LOGTYPE_ADD_NODE:
return 'add'
else:
return 'unknown'
def state2str(state):
if state == lib.RAFT_STATE_LEADER:
return colorama.Fore.GREEN + 'leader' + colorama.Style.RESET_ALL
elif state == lib.RAFT_STATE_CANDIDATE:
return 'candidate'
elif state == lib.RAFT_STATE_FOLLOWER:
return 'follower'
else:
return 'unknown'
def connectstatus2str(connectstatus):
return {
NODE_DISCONNECTED: colorama.Fore.RED + 'DISCONNECTED' + colorama.Style.RESET_ALL,
NODE_CONNECTING: 'CONNECTING',
NODE_CONNECTED: colorama.Fore.GREEN + 'CONNECTED' + colorama.Style.RESET_ALL,
NODE_DISCONNECTING: colorama.Fore.YELLOW + 'DISCONNECTING' + colorama.Style.RESET_ALL,
}[connectstatus]
def err2str(err):
return {
lib.RAFT_ERR_NOT_LEADER: 'RAFT_ERR_NOT_LEADER',
lib.RAFT_ERR_ONE_VOTING_CHANGE_ONLY: 'RAFT_ERR_ONE_VOTING_CHANGE_ONLY',
lib.RAFT_ERR_SHUTDOWN: 'RAFT_ERR_SHUTDOWN',
lib.RAFT_ERR_NOMEM: 'RAFT_ERR_NOMEM',
lib.RAFT_ERR_NEEDS_SNAPSHOT: 'RAFT_ERR_NEEDS_SNAPSHOT',
lib.RAFT_ERR_SNAPSHOT_IN_PROGRESS: 'RAFT_ERR_SNAPSHOT_IN_PROGRESS',
lib.RAFT_ERR_SNAPSHOT_ALREADY_LOADED: 'RAFT_ERR_SNAPSHOT_ALREADY_LOADED',
lib.RAFT_ERR_LAST: 'RAFT_ERR_LAST',
}[err]
class ChangeRaftEntry(object):
def __init__(self, node_id):
self.node_id = node_id
class SetRaftEntry(object):
def __init__(self, key, val):
self.key = key
self.val = val
class RaftEntry(object):
def __init__(self, entry):
self.term = entry.term
self.id = entry.id
class Snapshot(object):
def __init__(self):
self.members = []
SnapshotMember = collections.namedtuple('SnapshotMember', ['id', 'voting'], verbose=False)
def raft_send_requestvote(raft, udata, node, msg):
server = ffi.from_handle(udata)
dst_server = ffi.from_handle(lib.raft_node_get_udata(node))
server.network.enqueue_msg(msg, server, dst_server)
return 0
def raft_send_appendentries(raft, udata, node, msg):
server = ffi.from_handle(udata)
assert node
dst_server = ffi.from_handle(lib.raft_node_get_udata(node))
server.network.enqueue_msg(msg, server, dst_server)
# Collect statistics
if server.network.max_entries_in_ae < msg.n_entries:
server.network.max_entries_in_ae = msg.n_entries
return 0
def raft_send_snapshot(raft, udata, node):
return ffi.from_handle(udata).send_snapshot(node)
def raft_applylog(raft, udata, ety, idx):
try:
return ffi.from_handle(udata).entry_apply(ety, idx)
except:
return lib.RAFT_ERR_SHUTDOWN
def raft_persist_vote(raft, udata, voted_for):
return ffi.from_handle(udata).persist_vote(voted_for)
def raft_persist_term(raft, udata, term, vote):
return ffi.from_handle(udata).persist_term(term, vote)
def raft_logentry_offer(raft, udata, ety, ety_idx):
return ffi.from_handle(udata).entry_append(ety, ety_idx)
def raft_logentry_poll(raft, udata, ety, ety_idx):
return ffi.from_handle(udata).entry_poll(ety, ety_idx)
def raft_logentry_pop(raft, udata, ety, ety_idx):
return ffi.from_handle(udata).entry_pop(ety, ety_idx)
def raft_logentry_get_node_id(raft, udata, ety, ety_idx):
change_entry = ffi.from_handle(ety.data.buf)
assert isinstance(change_entry, ChangeRaftEntry)
return change_entry.node_id
def raft_node_has_sufficient_logs(raft, udata, node):
return ffi.from_handle(udata).node_has_sufficient_entries(node)
def raft_notify_membership_event(raft, udata, node, event_type):
return ffi.from_handle(udata).notify_membership_event(node, event_type)
def raft_log(raft, node, udata, buf):
server = ffi.from_handle(lib.raft_get_udata(raft))
# print(server.id, ffi.string(buf).decode('utf8'))
if node != ffi.NULL:
node = ffi.from_handle(lib.raft_node_get_udata(node))
# if server.id in [1] or (node and node.id in [1]):
logging.info('{0}> {1}:{2}: {3}'.format(
server.network.iteration,
server.id,
node.id if node else '',
ffi.string(buf).decode('utf8'),
))
class Message(object):
def __init__(self, msg, sendor, sendee):
self.data = msg
self.sendor = sendor
self.sendee = sendee
class Network(object):
def __init__(self, seed=0):
self.servers = []
self.messages = []
self.drop_rate = 0
self.dupe_rate = 0
self.partition_rate = 0
self.iteration = 0
self.leader = None
self.ety_id = 0
self.entries = []
self.random = random.Random(seed)
self.partitions = []
self.no_random_period = False
self.server_id = 0
# Information
self.max_entries_in_ae = 0
self.leadership_changes = 0
self.log_pops = 0
self.num_unique_nodes = 0
self.num_membership_changes = 0
self.num_compactions = 0
self.latest_applied_log_idx = 0
def add_server(self, server):
self.server_id += 1
server.id = self.server_id
assert server.id not in set([s.id for s in self.servers])
server.set_network(self)
self.servers.append(server)
def push_set_entry(self, k, v):
for sv in self.active_servers:
if lib.raft_is_leader(sv.raft):
ety = ffi.new('msg_entry_t*')
ety.term = 0
ety.id = self.new_entry_id()
ety.type = lib.RAFT_LOGTYPE_NORMAL
change = ffi.new_handle(SetRaftEntry(k, v))
ety.data.buf = change
ety.data.len = ffi.sizeof(ety.data.buf)
self.entries.append((ety, change))
e = sv.recv_entry(ety)
assert e == 0
break
def id2server(self, id):
for server in self.servers:
if server.id == id:
return server
# if lib.raft_get_nodeid(server.raft) == id:
# return server
raise ServerDoesNotExist('Could not find server: {}'.format(id))
def add_partition(self):
if len(self.active_servers) <= 1:
return
nodes = list(self.active_servers)
self.random.shuffle(nodes)
n1 = nodes.pop()
n2 = nodes.pop()
self.partitions.append((n1, n2))
def remove_partition(self):
self.random.shuffle(self.partitions)
self.partitions.pop()
def periodic(self):
if self.random.randint(1, 100) < self.member_rate:
if 20 < self.random.randint(1, 100):
self.add_member()
else:
self.remove_member()
if self.random.randint(1, 100) < self.partition_rate:
self.add_partition()
if self.partitions and self.random.randint(1, 100) < self.partition_rate:
self.remove_partition()
if self.random.randint(1, 100) < self.client_rate:
self.push_set_entry(self.random.randint(1, 10), self.random.randint(1, 10))
for server in self.active_servers:
if self.no_random_period:
server.periodic(100)
else:
server.periodic(self.random.randint(1, 100))
# Deadlock detection
if self.latest_applied_log_idx != 0 and self.latest_applied_log_iteration + 5000 < self.iteration:
logging.error("deadlock detected iteration:{0} appliedidx:{1}\n".format(
self.latest_applied_log_iteration,
self.latest_applied_log_idx,
))
self.diagnotistic_info()
sys.exit(1)
# Count leadership changes
leader_node = lib.raft_get_current_leader_node(self.active_servers[0].raft)
if leader_node:
leader = ffi.from_handle(lib.raft_node_get_udata(leader_node))
if self.leader is not leader:
self.leadership_changes += 1
self.leader = leader
def enqueue_msg(self, msg, sendor, sendee):
# Drop message if this edge is partitioned
for partition in self.partitions:
# Partitions are in one direction
if partition[0] is sendor and partition[1] is sendee:
return
if self.random.randint(1, 100) < self.drop_rate:
return
while self.random.randint(1, 100) < self.dupe_rate:
self._enqueue_msg(msg, sendor, sendee)
self._enqueue_msg(msg, sendor, sendee)
def _enqueue_msg(self, msg, sendor, sendee):
msg_type = ffi.getctype(ffi.typeof(msg))
msg_size = ffi.sizeof(msg[0])
new_msg = ffi.cast(ffi.typeof(msg), lib.malloc(msg_size))
ffi.memmove(new_msg, msg, msg_size)
if msg_type == 'msg_appendentries_t *':
size_of_entries = ffi.sizeof(ffi.getctype('msg_entry_t')) * new_msg.n_entries
new_msg.entries = ffi.cast('msg_entry_t*', lib.malloc(size_of_entries))
ffi.memmove(new_msg.entries, msg.entries, size_of_entries)
self.messages.append(Message(new_msg, sendor, sendee))
def poll_message(self, msg):
msg_type = ffi.getctype(ffi.typeof(msg.data))
if msg_type == 'msg_appendentries_t *':
node = lib.raft_get_node(msg.sendee.raft, msg.sendor.id)
response = ffi.new('msg_appendentries_response_t*')
e = lib.raft_recv_appendentries(msg.sendee.raft, node, msg.data, response)
if lib.RAFT_ERR_SHUTDOWN == e:
logging.error('Catastrophic')
print(msg.sendee.debug_log())
print(msg.sendor.debug_log())
sys.exit(1)
elif lib.RAFT_ERR_NEEDS_SNAPSHOT == e:
pass # TODO: pretend as if snapshot works
else:
self.enqueue_msg(response, msg.sendee, msg.sendor)
elif msg_type == 'msg_appendentries_response_t *':
node = lib.raft_get_node(msg.sendee.raft, msg.sendor.id)
lib.raft_recv_appendentries_response(msg.sendee.raft, node, msg.data)
elif msg_type == 'msg_requestvote_t *':
response = ffi.new('msg_requestvote_response_t*')
node = lib.raft_get_node(msg.sendee.raft, msg.sendor.id)
lib.raft_recv_requestvote(msg.sendee.raft, node, msg.data, response)
self.enqueue_msg(response, msg.sendee, msg.sendor)
elif msg_type == 'msg_requestvote_response_t *':
node = lib.raft_get_node(msg.sendee.raft, msg.sendor.id)
e = lib.raft_recv_requestvote_response(msg.sendee.raft, node, msg.data)
if lib.RAFT_ERR_SHUTDOWN == e:
msg.sendor.shutdown()
else:
assert False
def poll_messages(self):
msgs = self.messages
# Chaos: re-ordered messages
# self.random.shuffle(msgs)
self.messages = []
for msg in msgs:
self.poll_message(msg)
for server in self.active_servers:
if hasattr(server, 'abort_exception'):
raise server.abort_exception
self._check_current_idx_validity(server)
self._check_election_safety()
def _check_current_idx_validity(self, server):
"""
Check that current idx is valid, ie. it exists
"""
ci = lib.raft_get_current_idx(server.raft)
if 0 < ci and not lib.raft_get_snapshot_last_idx(server.raft) == ci:
ety = lib.raft_get_entry_from_idx(server.raft, ci)
try:
assert ety
except Exception:
print('current idx ', ci)
print('count', lib.raft_get_log_count(server.raft))
print('last snapshot', lib.raft_get_snapshot_last_idx(server.raft))
print(server.debug_log())
raise
def _check_election_safety(self):
"""
FIXME: this is O(n^2)
Election Safety
At most one leader can be elected in a given term.
"""
for i, sv1 in enumerate(net.active_servers):
if not lib.raft_is_leader(sv1.raft):
continue
for sv2 in net.active_servers[i + 1:]:
term1 = lib.raft_get_current_term(sv1.raft)
term2 = lib.raft_get_current_term(sv2.raft)
if lib.raft_is_leader(sv2.raft) and term1 == term2:
logging.error("election safety invalidated")
print(sv1, sv2, term1)
print('partitions:', self.partitions)
sys.exit(1)
def commit_static_configuration(self):
for server in net.active_servers:
server.connection_status = NODE_CONNECTED
for sv in net.active_servers:
is_self = 1 if sv.id == server.id else 0
node = lib.raft_add_node(server.raft, sv.udata, sv.id, is_self)
# FIXME: it's a bit much to expect to set these too
lib.raft_node_set_voting_committed(node, 1)
lib.raft_node_set_addition_committed(node, 1)
lib.raft_node_set_active(node, 1)
def prep_dynamic_configuration(self):
"""
Add configuration change for leader's node
"""
server = self.active_servers[0]
self.leader = server
server.set_connection_status(NODE_CONNECTED)
lib.raft_add_non_voting_node(server.raft, server.udata, server.id, 1)
lib.raft_become_leader(server.raft)
# Configuration change entry to bootstrap other nodes
ety = ffi.new('msg_entry_t*')
ety.term = 0
ety.type = lib.RAFT_LOGTYPE_ADD_NODE
ety.id = self.new_entry_id()
change = ffi.new_handle(ChangeRaftEntry(server.id))
ety.data.buf = change
ety.data.len = ffi.sizeof(ety.data.buf)
self.entries.append((ety, change))
e = server.recv_entry(ety)
assert e == 0
lib.raft_set_commit_idx(server.raft, 1)
e = lib.raft_apply_all(server.raft)
assert e == 0
def new_entry_id(self):
self.ety_id += 1
return self.ety_id
def remove_server(self, server):
server.removed = True
# self.servers = [s for s in self.servers if s is not server]
@property
def active_servers(self):
return [s for s in self.servers if not getattr(s, 'removed', False)]
def add_member(self):
if net.num_of_servers <= len(self.active_servers):
return
if not self.leader:
logging.error('no leader')
return
leader = self.leader
if not lib.raft_is_leader(leader.raft):
return
if lib.raft_voting_change_is_in_progress(leader.raft):
# logging.error('{} voting change in progress'.format(server))
return
server = RaftServer(self)
# Create a new configuration entry to be processed by the leader
ety = ffi.new('msg_entry_t*')
ety.term = 0
ety.id = self.new_entry_id()
ety.type = lib.RAFT_LOGTYPE_ADD_NONVOTING_NODE
change = ffi.new_handle(ChangeRaftEntry(server.id))
ety.data.buf = change
ety.data.len = ffi.sizeof(ety.data.buf)
assert(lib.raft_entry_is_cfg_change(ety))
self.entries.append((ety, change))
e = leader.recv_entry(ety)
if 0 != e:
logging.error(err2str(e))
return
else:
self.num_membership_changes += 1
# Wake up new node
server.set_connection_status(NODE_CONNECTING)
assert server.udata
added_node = lib.raft_add_non_voting_node(server.raft, server.udata, server.id, 1)
assert added_node
def remove_member(self):
if not self.leader:
logging.error('no leader')
return
leader = self.leader
server = self.random.choice(self.active_servers)
if not lib.raft_is_leader(leader.raft):
return
if lib.raft_voting_change_is_in_progress(leader.raft):
# logging.error('{} voting change in progress'.format(server))
return
if leader == server:
# logging.error('can not remove leader')
return
if server.connection_status in [NODE_CONNECTING, NODE_DISCONNECTING]:
# logging.error('can not remove server that is changing connection status')
return
if NODE_DISCONNECTED == server.connection_status:
self.remove_server(server)
return
# Create a new configuration entry to be processed by the leader
ety = ffi.new('msg_entry_t*')
ety.term = 0
ety.id = self.new_entry_id()
assert server.connection_status == NODE_CONNECTED
ety.type = lib.RAFT_LOGTYPE_DEMOTE_NODE
change = ffi.new_handle(ChangeRaftEntry(server.id))
ety.data.buf = change
ety.data.len = ffi.sizeof(ety.data.buf)
assert(lib.raft_entry_is_cfg_change(ety))
self.entries.append((ety, change))
e = leader.recv_entry(ety)
if 0 != e:
logging.error(err2str(e))
return
else:
self.num_membership_changes += 1
# Wake up new node
assert NODE_CONNECTED == server.connection_status
server.set_connection_status(NODE_DISCONNECTING)
def diagnotistic_info(self):
print()
info = {
"Maximum appendentries size": self.max_entries_in_ae,
"Leadership changes": self.leadership_changes,
"Log pops": self.log_pops,
"Unique nodes": self.num_unique_nodes,
"Membership changes": self.num_membership_changes,
"Compactions": self.num_compactions,
}
for k, v in info.items():
print(k, v)
print()
def abbreviate(k):
return re.sub(r'([a-z])[a-z]*_', r'\1', k)
# Servers
keys = sorted(net.servers[0].debug_statistics().keys())
print(keys)
data = [list(map(abbreviate, keys))] + [
[s.debug_statistics()[key] for key in keys]
for s in net.servers
]
table = terminaltables.AsciiTable(data)
print(table.table)
class RaftServer(object):
def __init__(self, network):
self.connection_status = NODE_DISCONNECTED
self.raft = lib.raft_new()
self.udata = ffi.new_handle(self)
network.add_server(self)
self.load_callbacks()
cbs = ffi.new('raft_cbs_t*')
cbs.send_requestvote = self.raft_send_requestvote
cbs.send_appendentries = self.raft_send_appendentries
cbs.send_snapshot = self.raft_send_snapshot
cbs.applylog = self.raft_applylog
cbs.persist_vote = self.raft_persist_vote
cbs.persist_term = self.raft_persist_term
cbs.log_offer = self.raft_logentry_offer
cbs.log_poll = self.raft_logentry_poll
cbs.log_pop = self.raft_logentry_pop
cbs.log_get_node_id = self.raft_logentry_get_node_id
cbs.node_has_sufficient_logs = self.raft_node_has_sufficient_logs
cbs.notify_membership_event = self.raft_notify_membership_event
cbs.log = self.raft_log
lib.raft_set_callbacks(self.raft, cbs, self.udata)
lib.raft_set_election_timeout(self.raft, 500)
self.fsm_dict = {}
self.fsm_log = []
def __str__(self):
return '<Server: {0}>'.format(self.id)
def __repr__(self):
return 'sv:{0}'.format(self.id)
def set_connection_status(self, new_status):
assert(not (self.connection_status == NODE_CONNECTED and new_status == NODE_CONNECTING))
# logging.warning('{}: {} -> {}'.format(
# self,
# connectstatus2str(self.connection_status),
# connectstatus2str(new_status)))
self.connection_status = new_status
def debug_log(self):
first_idx = lib.raft_get_snapshot_last_idx(self.raft)
return [(i + first_idx, l.term, l.id) for i, l in enumerate(self.fsm_log)]
def do_compaction(self):
# logging.warning('{} snapshotting'.format(self))
# entries_before = lib.raft_get_log_count(self.raft)
e = lib.raft_begin_snapshot(self.raft)
if e != 0:
return
assert(lib.raft_snapshot_is_in_progress(self.raft))
e = lib.raft_end_snapshot(self.raft)
assert(e == 0)
if e != 0:
return
self.do_membership_snapshot()
self.snapshot.image = dict(self.fsm_dict)
self.snapshot.last_term = lib.raft_get_snapshot_last_term(self.raft)
self.snapshot.last_idx = lib.raft_get_snapshot_last_idx(self.raft)
self.network.num_compactions += 1
# logging.warning('{} entries compacted {}'.format(
# self,
# entries_before - lib.raft_get_log_count(self.raft)
# ))
def periodic(self, msec):
if self.network.random.randint(1, 100000) < self.network.compaction_rate:
self.do_compaction()
e = lib.raft_periodic(self.raft, msec)
if lib.RAFT_ERR_SHUTDOWN == e:
self.shutdown()
# e = lib.raft_apply_all(self.raft)
# if lib.RAFT_ERR_SHUTDOWN == e:
# self.shutdown()
# return
if hasattr(self, 'abort_exception'):
raise self.abort_exception
def shutdown(self):
# logging.error('{} shutting down'.format(self))
self.set_connection_status(NODE_DISCONNECTED)
self.network.remove_server(self)
def set_network(self, network):
self.network = network
def load_callbacks(self):
self.raft_send_requestvote = ffi.callback("int(raft_server_t*, void*, raft_node_t*, msg_requestvote_t*)", raft_send_requestvote)
self.raft_send_appendentries = ffi.callback("int(raft_server_t*, void*, raft_node_t*, msg_appendentries_t*)", raft_send_appendentries)
self.raft_send_snapshot = ffi.callback("int(raft_server_t*, void* , raft_node_t*)", raft_send_snapshot)
self.raft_applylog = ffi.callback("int(raft_server_t*, void*, raft_entry_t*, raft_index_t)", raft_applylog)
self.raft_persist_vote = ffi.callback("int(raft_server_t*, void*, raft_node_id_t)", raft_persist_vote)
self.raft_persist_term = ffi.callback("int(raft_server_t*, void*, raft_term_t, raft_node_id_t)", raft_persist_term)
self.raft_logentry_offer = ffi.callback("int(raft_server_t*, void*, raft_entry_t*, raft_index_t)", raft_logentry_offer)
self.raft_logentry_poll = ffi.callback("int(raft_server_t*, void*, raft_entry_t*, raft_index_t)", raft_logentry_poll)
self.raft_logentry_pop = ffi.callback("int(raft_server_t*, void*, raft_entry_t*, raft_index_t)", raft_logentry_pop)
self.raft_logentry_get_node_id = ffi.callback("int(raft_server_t*, void*, raft_entry_t*, raft_index_t)", raft_logentry_get_node_id)
self.raft_node_has_sufficient_logs = ffi.callback("int(raft_server_t* raft, void *user_data, raft_node_t* node)", raft_node_has_sufficient_logs)
self.raft_notify_membership_event = ffi.callback("void(raft_server_t* raft, void *user_data, raft_node_t* node, raft_membership_e)", raft_notify_membership_event)
self.raft_log = ffi.callback("void(raft_server_t*, raft_node_t*, void*, const char* buf)", raft_log)
def recv_entry(self, ety):
# FIXME: leak
response = ffi.new('msg_entry_response_t*')
return lib.raft_recv_entry(self.raft, ety, response)
def get_entry(self, idx):
idx = idx - lib.raft_get_snapshot_last_idx(self.raft)
if idx < 0:
raise IndexError
try:
return self.fsm_log[idx]
except:
# self.abort_exception = e
raise
def _check_log_matching(self, our_log, idx):
"""
Quality:
Log Matching: if two logs contain an entry with the same index and
term, then the logs are identical in all entries up through the given
index. §5.3
State Machine Safety: if a server has applied a log entry at a given
index to its state machine, no other server will ever apply a
different log entry for the same index. §5.4.3
"""
for server in self.network.active_servers:
if server is self:
continue
their_commit_idx = lib.raft_get_commit_idx(server.raft)
if lib.raft_get_commit_idx(self.raft) <= their_commit_idx and idx <= their_commit_idx:
their_log = lib.raft_get_entry_from_idx(server.raft, idx)
if their_log == ffi.NULL:
assert idx < lib.raft_get_snapshot_last_idx(self.raft)
if their_log.type in [lib.RAFT_LOGTYPE_NORMAL]:
try:
assert their_log.term == our_log.term
assert their_log.id == our_log.id
except Exception as e:
ety1 = lib.raft_get_entry_from_idx(self.raft, idx)
ety2 = lib.raft_get_entry_from_idx(server.raft, idx)
logging.error('ids', ety1.id, ety2.id)
logging.error('{0}vs{1} idx:{2} terms:{3} {4} ids:{5} {6}'.format(
self, server,
idx,
our_log.term, their_log.term,
our_log.id, their_log.id))
self.abort_exception = e
logging.error(self.debug_log())
logging.error(server.debug_log())
return lib.RAFT_ERR_SHUTDOWN
def entry_apply(self, ety, idx):
# collect stats
if self.network.latest_applied_log_idx < idx:
self.network.latest_applied_log_idx = idx
self.network.latest_applied_log_iteration = self.network.iteration
e = self._check_log_matching(ety, idx)
if e is not None:
return e
change = ffi.from_handle(ety.data.buf)
if ety.type == lib.RAFT_LOGTYPE_NORMAL:
self.fsm_dict[change.key] = change.val
elif ety.type == lib.RAFT_LOGTYPE_DEMOTE_NODE:
if change.node_id == lib.raft_get_nodeid(self.raft):
# logging.warning("{} shutting down because of demotion".format(self))
return lib.RAFT_ERR_SHUTDOWN
# Follow up by removing the node by receiving new entry
elif lib.raft_is_leader(self.raft):
new_ety = ffi.new('msg_entry_t*')
new_ety.term = 0
new_ety.id = self.network.new_entry_id()
new_ety.type = lib.RAFT_LOGTYPE_REMOVE_NODE
new_ety.data.buf = ety.data.buf
new_ety.data.len = ffi.sizeof(ety.data.buf)
assert(lib.raft_entry_is_cfg_change(new_ety))
e = self.recv_entry(new_ety)
assert e == 0
elif ety.type == lib.RAFT_LOGTYPE_REMOVE_NODE:
if change.node_id == lib.raft_get_nodeid(self.raft):
# logging.warning("{} shutting down because of removal".format(self))
return lib.RAFT_ERR_SHUTDOWN
elif ety.type == lib.RAFT_LOGTYPE_ADD_NODE:
if change.node_id == self.id:
self.set_connection_status(NODE_CONNECTED)
elif ety.type == lib.RAFT_LOGTYPE_ADD_NONVOTING_NODE:
pass
return 0
def do_membership_snapshot(self):
self.snapshot = Snapshot()
for i in range(0, lib.raft_get_num_nodes(self.raft)):
n = lib.raft_get_node_from_idx(self.raft, i)
id = lib.raft_node_get_id(n)
if 0 == lib.raft_node_is_addition_committed(n):
id = -1
self.snapshot.members.append(
SnapshotMember(id, lib.raft_node_is_voting_committed(n)))
def load_snapshot(self, snapshot, other):
# logging.warning('{} loading snapshot'.format(self))
e = lib.raft_begin_load_snapshot(
self.raft,
snapshot.last_term,
snapshot.last_idx,
)
if e == -1:
return 0
elif e == lib.RAFT_ERR_SNAPSHOT_ALREADY_LOADED:
return 0
elif e == 0:
pass
else:
assert False
# Send appendentries response for this snapshot
response = ffi.new('msg_appendentries_response_t*')
response.success = 1
response.current_idx = snapshot.last_idx
response.term = lib.raft_get_current_term(self.raft)
response.first_idx = response.current_idx
self.network.enqueue_msg(response, self, other)
node_id = lib.raft_get_nodeid(self.raft)
# set membership configuration according to snapshot
for member in snapshot.members:
if -1 == member.id:
continue
node = lib.raft_get_node(self.raft, member.id)
if not node:
udata = ffi.NULL
try:
node_sv = self.network.id2server(member.id)
udata = node_sv.udata
except ServerDoesNotExist:
pass
node = lib.raft_add_node(self.raft, udata, member.id, member.id == node_id)
lib.raft_node_set_active(node, 1)
if member.voting and not lib.raft_node_is_voting(node):
lib.raft_node_set_voting(node, 1)
elif not member.voting and lib.raft_node_is_voting(node):
lib.raft_node_set_voting(node, 0)
if node_id != member.id:
assert node
# TODO: this is quite ugly
# we should have a function that removes all nodes by ourself
# if (!raft_get_my_node(self->raft)) */
# raft_add_non_voting_node(self->raft, NULL, node_id, 1); */
e = lib.raft_end_load_snapshot(self.raft)
assert(0 == e)
assert(lib.raft_get_log_count(self.raft) == 0)
self.do_membership_snapshot()
self.snapshot.image = dict(snapshot.image)
self.snapshot.last_term = snapshot.last_term
self.snapshot.last_idx = snapshot.last_idx
assert(lib.raft_get_my_node(self.raft))
# assert(sv->snapshot_fsm);
self.fsm_dict = dict(snapshot.image)
logging.warning('{} loaded snapshot t:{} idx:{}'.format(
self, snapshot.last_term, snapshot.last_idx))
def send_snapshot(self, node):
assert not lib.raft_snapshot_is_in_progress(self.raft)
# FIXME: Why would this happen?
if not hasattr(self, 'snapshot'):
return 0
node_sv = ffi.from_handle(lib.raft_node_get_udata(node))
# TODO: Why would this happen?
# seems odd that we would send something to a node that didn't exist
if not node_sv:
return 0
# NOTE:
# In a real server we would have to send the snapshot file to the
# other node. Here we have the convenience of the transfer being
# "immediate".
node_sv.load_snapshot(self.snapshot, self)
return 0
def persist_vote(self, voted_for):
# TODO: add disk simulation
return 0
def persist_term(self, term, vote):
# TODO: add disk simulation
return 0
def _check_id_monoticity(self, ety):
"""
Check last entry has smaller ID than new entry.
This is a virtraft specific check to make sure entry passing is
working correctly.
"""
ci = lib.raft_get_current_idx(self.raft)
if 0 < ci and not lib.raft_get_snapshot_last_idx(self.raft) == ci:
try:
prev_ety = lib.raft_get_entry_from_idx(self.raft, ci)
assert prev_ety
other_id = prev_ety.id
assert other_id < ety.id
except Exception as e:
logging.error(other_id, ety.id)
self.abort_exception = e
raise
def entry_append(self, ety, ety_idx):
try:
assert not self.fsm_log or self.fsm_log[-1].term <= ety.term
except Exception as e:
self.abort_exception = e
# FIXME: consider returning RAFT_ERR_SHUTDOWN
raise
self._check_id_monoticity(ety)
self.fsm_log.append(RaftEntry(ety))
return 0
def entry_poll(self, ety, ety_idx):
self.fsm_log.pop(0)
return 0
def _check_committed_entry_popping(self, ety_idx):
"""
Check we aren't popping a committed entry
"""
try:
assert lib.raft_get_commit_idx(self.raft) < ety_idx
except Exception as e:
self.abort_exception = e
return lib.RAFT_ERR_SHUTDOWN
return 0
def entry_pop(self, ety, ety_idx):
# logging.warning("POP {} {}".format(self, ety_idx))
e = self._check_committed_entry_popping(ety_idx)
if e != 0:
return e
self.fsm_log.pop()
self.network.log_pops += 1
change = ffi.from_handle(ety.data.buf)
if ety.type == lib.RAFT_LOGTYPE_DEMOTE_NODE:
pass
elif ety.type == lib.RAFT_LOGTYPE_REMOVE_NODE:
if change.node_id == lib.raft_get_nodeid(self.raft):
self.set_connection_status(NODE_CONNECTED)
elif ety.type == lib.RAFT_LOGTYPE_ADD_NONVOTING_NODE:
if change.node_id == lib.raft_get_nodeid(self.raft):
logging.error("POP disconnect {} {}".format(self, ety_idx))
self.set_connection_status(NODE_DISCONNECTED)
elif ety.type == lib.RAFT_LOGTYPE_ADD_NODE:
if change.node_id == lib.raft_get_nodeid(self.raft):
self.set_connection_status(NODE_CONNECTING)
return 0
def node_has_sufficient_entries(self, node):
assert(not lib.raft_node_is_voting(node))
ety = ffi.new('msg_entry_t*')
ety.term = 0
ety.id = self.network.new_entry_id()
ety.type = lib.RAFT_LOGTYPE_ADD_NODE
change = ffi.new_handle(ChangeRaftEntry(lib.raft_node_get_id(node)))
ety.data.buf = change
ety.data.len = ffi.sizeof(ety.data.buf)
self.network.entries.append((ety, change))
assert(lib.raft_entry_is_cfg_change(ety))
# FIXME: leak
e = self.recv_entry(ety)
# print(err2str(e))
assert e == 0
return 0
def notify_membership_event(self, node, event_type):
# Convenience: Ensure that added node has udata set
if event_type == lib.RAFT_MEMBERSHIP_ADD:
node_id = lib.raft_node_get_id(node)
try:
server = self.network.id2server(node_id)
except ServerDoesNotExist:
pass
else:
node = lib.raft_get_node(self.raft, node_id)
lib.raft_node_set_udata(node, server.udata)
def debug_statistics(self):
return {
"node": lib.raft_get_nodeid(self.raft),
"state": state2str(lib.raft_get_state(self.raft)),
"current_idx": lib.raft_get_current_idx(self.raft),
"last_log_term": lib.raft_get_last_log_term(self.raft),
"current_term": lib.raft_get_current_term(self.raft),
"commit_idx": lib.raft_get_commit_idx(self.raft),
"last_applied_idx": lib.raft_get_last_applied_idx(self.raft),
"log_count": lib.raft_get_log_count(self.raft),
"peers": lib.raft_get_num_nodes(self.raft),
"voting_peers": lib.raft_get_num_voting_nodes(self.raft),
"connection_status": connectstatus2str(self.connection_status),
"voting_change_in_progress": lib.raft_voting_change_is_in_progress(self.raft),
"removed": getattr(self, 'removed', False),
}
if __name__ == '__main__':
try:
args = docopt.docopt(__doc__, version='virtraft 0.1')
except docopt.DocoptExit as e:
print(e)
sys.exit()
if args['--verbose'] or args['--log_level']:
coloredlogs.install(fmt='%(asctime)s %(levelname)s %(message)s')
level = logging.DEBUG
if args['--log_level']:
level = int(args['--log_level'])
logging.basicConfig(level=level, format='%(asctime)s %(message)s')
net = Network(int(args['--seed']))
net.dupe_rate = int(args['--dupe_rate'])
net.drop_rate = int(args['--drop_rate'])
net.client_rate = int(args['--client_rate'])
net.member_rate = int(args['--member_rate'])
net.compaction_rate = int(args['--compaction_rate'])
net.partition_rate = int(args['--partition_rate'])
net.no_random_period = 1 == int(args['--no_random_period'])
net.num_of_servers = int(args['--servers'])
if net.member_rate == 0:
for i in range(0, int(args['--servers'])):
RaftServer(net)
net.commit_static_configuration()
else:
RaftServer(net)
net.prep_dynamic_configuration()
for i in range(0, int(args['--iterations'])):
net.iteration += 1
try:
net.periodic()
net.poll_messages()
except:
# for server in net.servers:
# print(server, [l.term for l in server.fsm_log])
raise
net.diagnotistic_info()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment