Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ruanhaishen
RedisLabs Raft
Commits
9f348d65
Unverified
Commit
9f348d65
authored
Feb 07, 2022
by
Ozan Tezcan
Committed by
GitHub
Feb 07, 2022
Browse files
Add support for async disk flush, backpressure and batching (#81)
Add support for async disk flush, backpressure and batching
parent
39f24a05
Changes
9
Hide whitespace changes
Inline
Side-by-side
Makefile
View file @
9f348d65
...
...
@@ -125,6 +125,8 @@ test_virtraft: $(RAFT_CFFI_TARGET)
python3 tests/virtraft2.py
--servers
5
-i
20000
--compaction_rate
50
--drop_rate
5
-P
10
--seed
5
-m
3
--client_rate
0
$(VIRTRAFT_OPTS)
python3 tests/virtraft2.py
--servers
5
-i
20000
--compaction_rate
50
--drop_rate
5
-P
10
--seed
6
-m
3
$(VIRTRAFT_OPTS)
python3 tests/virtraft2.py
--servers
5
-i
20000
--compaction_rate
50
--drop_rate
5
-P
10
--seed
6
-m
3
--client_rate
0
$(VIRTRAFT_OPTS)
python3 tests/virtraft2.py
--servers
5
-i
20000
--compaction_rate
50
--drop_rate
5
-P
10
--seed
1
-m
3
--auto_flush
$(VIRTRAFT_OPTS)
python3 tests/virtraft2.py
--servers
5
-i
20000
--compaction_rate
50
--drop_rate
5
-P
10
--seed
6
-m
3
--auto_flush
$(VIRTRAFT_OPTS)
.PHONY
:
amalgamation
amalgamation
:
...
...
include/raft.h
View file @
9f348d65
...
...
@@ -603,6 +603,38 @@ typedef int (
raft_node_t
*
node
);
/** Callback to skip sending msg_appendentries to the node
*
* Implementing this callback is optional
*
* If there are already pending appendentries messages in flight, you may want
* to skip sending more until you receive response for the previous ones.
* If the node is a slow consumer and you create msg_appendentries for each
* batch of new entries received, it may cause out of memory.
*
* Also, this way you can do batching. If new entries are received with an
* interval, creating a new appendentries message for each one might be
* inefficient. For each appendentries message, follower has to write entries
* to the disk before sending the response. e.g If there are 1000 appendentries
* message in flight, to commit a new entry, previous 1000 disk write operations
* must be completed. Considering disk write operations are quite slow, 1000
* write operations will take quite a time. A better approach would be limiting
* in flight appendentries messages depending on network conditions and disk
* performance.
*
* @param[in] raft The Raft server making this callback
* @param[in] node The node that we are about to send msg_appendentries to
* @return 0 to send message
* Any other value to skip sending message
*/
typedef
int
(
*
func_backpressure_f
)
(
raft_server_t
*
raft
,
void
*
user_data
,
raft_node_t
*
node
);
typedef
struct
{
/** Callback for sending request vote messages */
...
...
@@ -665,6 +697,9 @@ typedef struct
/** Callback for sending TimeoutNow RPC messages to nodes */
func_send_timeoutnow_f
send_timeoutnow
;
/** Callback for deciding whether to send msg_appendentries to a node. */
func_backpressure_f
backpressure
;
}
raft_cbs_t
;
/** A generic notification callback used to allow Raft to notify caller
...
...
@@ -843,6 +878,13 @@ typedef struct raft_log_impl
* Number of entries.
*/
raft_index_t
(
*
count
)
(
void
*
log
);
/** Persist log file to the disk. Usually, implemented as calling fsync()
* for the log file.
* @return 0 on success
* -1 on error
*/
int
(
*
sync
)
(
void
*
log
);
}
raft_log_impl_t
;
/** Initialise a new Raft server, using the in-memory log implementation.
...
...
@@ -1474,7 +1516,7 @@ extern const raft_log_impl_t raft_log_internal_impl;
void
raft_handle_append_cfg_change
(
raft_server_t
*
me_
,
raft_entry_t
*
ety
,
raft_index_t
idx
);
void
raft_queue_read_request
(
raft_server_t
*
me_
,
func_read_request_callback_f
cb
,
void
*
cb_arg
);
int
raft_queue_read_request
(
raft_server_t
*
me_
,
func_read_request_callback_f
cb
,
void
*
cb_arg
);
/** Attempt to process read queue.
*/
...
...
@@ -1495,4 +1537,71 @@ void raft_set_timeout_now(raft_server_t* me_);
raft_index_t
raft_get_num_snapshottable_logs
(
raft_server_t
*
me_
);
/** Disable auto flush mode. Default is enabled.
*
* In auto flush mode, after each raft_recv_entry() call, raft_log_impl_t's
* sync() is called to verify entry is persisted. Also, appendentries messages
* are sent for the entry immediately. It's easy to use library in this mode but
* to achieve better performance, we need batching. We can write entries to disk
* in another thread and send a single appendentries message for multiple
* entries. To do that, we disable auto flush mode. Once we do that, library
* user must check the newest log index by calling raft_get_index_to_sync() and
* verify new entries upto that index is written to the disk, probably in
* another thread. Also, users should call raft_flush() often to update
* persisted log index and to send new appendentries messages.
*
* Example :
*
* void server_loop() {
* while (1) {
* HandleNetworkOperations();
*
* for (int i = 0; i < new_readreq_count; i++)
* raft_queue_read_request(raft, read_requests[i]);
*
* for (int i = 0; i < new_requests_count; i++)
* raft_recv_entry(raft, new_requests[i]);
*
* raft_index_t current_idx = raft_get_index_to_sync(raft);
* if (current_idx != 0) {
* TriggerAsyncWriteForIndex(current_idx);
* }
*
* raft_index_t sync_index = GetLastCompletedSyncIndex();
*
* // This call will send new appendentries messages if necessary
* raft_flush(sync_index);
* }
* }
*
* raft_flush() is no-op if node is follower.
*
* @param[in] raft The Raft server
* @param[in] flush 1 to enable, 0 to disable
* @return 0 on success
*/
int
raft_set_auto_flush
(
raft_server_t
*
me
,
int
flush
);
/** Returns the latest entry index that needs to be written to the disk.
*
* This function is only useful when auto flush is disabled. Same index will be
* reported once.
*
* @param[in] raft The Raft server
* @return entry index need to be written to the disk.
* '0' if there is no new entry to write to the disk
*/
raft_index_t
raft_get_index_to_sync
(
raft_server_t
*
me
);
/** Update persisted index, send messages(e.g appendentries) to the followers.
*
* raft_flush() is no-op if node is follower.
*
* @param[in] raft The Raft server
* @param[in] sync_index Entry index of the last persisted entry. '0' to skip
* updating persisted index.
* @return 0 on success
*/
int
raft_flush
(
raft_server_t
*
me
,
raft_index_t
sync_index
);
#endif
/* RAFT_H_ */
include/raft_private.h
View file @
9f348d65
...
...
@@ -120,10 +120,25 @@ typedef struct {
raft_read_request_t
*
read_queue_head
;
raft_read_request_t
*
read_queue_tail
;
/* Do we need quorum ? e.g Leader received a read request, need quorum round
* before processing it */
int
need_quorum_round
;
raft_node_id_t
node_transferring_leader_to
;
// the node we are targeting for leadership
long
transfer_leader_time
;
// how long we should wait for leadership transfer to take, before aborting
int
sent_timeout_now
;
// if we've already sent a leadership transfer signal
/* If this config is off (equals zero), user must call raft_flush()
* manually. It will trigger sending appendreqs, applying entries etc.
* Useful for batching, e.g after many raft_recv_entry() calls,
* one raft_flush() call will trigger sending appendreq for the latest
* entries. */
int
auto_flush
;
/* Index of the log entry that need to be written to the disk. Only useful
* when auto flush is disabled. */
raft_index_t
next_sync_index
;
int
timeout_now
;
}
raft_server_private_t
;
...
...
src/raft_log.c
View file @
9f348d65
...
...
@@ -424,6 +424,11 @@ static raft_index_t __log_count(void *log)
return
log_count
(
log
);
}
static
int
__log_sync
(
void
*
log
)
{
return
0
;
}
const
raft_log_impl_t
raft_log_internal_impl
=
{
.
init
=
__log_init
,
.
free
=
__log_free
,
...
...
@@ -435,5 +440,6 @@ const raft_log_impl_t raft_log_internal_impl = {
.
get_batch
=
__log_get_batch
,
.
first_idx
=
__log_first_idx
,
.
current_idx
=
__log_current_idx
,
.
count
=
__log_count
.
count
=
__log_count
,
.
sync
=
__log_sync
};
src/raft_server.c
View file @
9f348d65
...
...
@@ -118,6 +118,7 @@ raft_server_t* raft_new_with_log(const raft_log_impl_t *log_impl, void *log_arg)
me
->
request_timeout
=
200
;
me
->
election_timeout
=
1000
;
me
->
node_transferring_leader_to
=
RAFT_NODE_ID_NONE
;
me
->
auto_flush
=
1
;
raft_update_quorum_meta
((
raft_server_t
*
)
me
,
me
->
msg_id
);
...
...
@@ -367,8 +368,12 @@ int raft_delete_entry_from_idx(raft_server_t* me_, raft_index_t idx)
if
(
idx
<=
me
->
voting_cfg_change_log_idx
)
me
->
voting_cfg_change_log_idx
=
-
1
;
return
me
->
log_impl
->
pop
(
me
->
log
,
idx
,
(
func_entry_notify_f
)
raft_handle_remove_cfg_change
,
me_
);
int
e
=
me
->
log_impl
->
pop
(
me
->
log
,
idx
,
(
func_entry_notify_f
)
raft_handle_remove_cfg_change
,
me_
);
if
(
e
!=
0
)
return
e
;
return
me
->
log_impl
->
sync
(
me
->
log
);
}
int
raft_election_start
(
raft_server_t
*
me_
)
...
...
@@ -422,6 +427,13 @@ int raft_become_leader(raft_server_t* me_)
if
(
0
!=
e
)
return
e
;
e
=
me
->
log_impl
->
sync
(
me
->
log
);
if
(
0
!=
e
)
return
e
;
raft_node_set_match_idx
(
me
->
node
,
raft_get_current_idx
(
me_
));
me
->
next_sync_index
=
raft_get_current_idx
(
me_
)
+
1
;
// Commit noop immediately if this is a single node cluster
if
(
raft_is_single_node_voting_cluster
(
me_
))
{
raft_set_commit_idx
(
me_
,
raft_get_current_idx
(
me_
));
...
...
@@ -601,6 +613,7 @@ int raft_periodic(raft_server_t* me_, int msec_since_last_period)
if
(
me
->
request_timeout
<=
me
->
timeout_elapsed
)
{
me
->
msg_id
++
;
me
->
timeout_elapsed
=
0
;
raft_send_appendentries_all
(
me_
);
}
...
...
@@ -751,37 +764,8 @@ int raft_recv_appendentries_response(raft_server_t* me_,
raft_node_set_next_idx
(
node
,
r
->
current_idx
+
1
);
raft_node_set_match_idx
(
node
,
r
->
current_idx
);
/* Update commit idx */
raft_index_t
point
=
r
->
current_idx
;
if
(
point
)
{
raft_entry_t
*
ety
=
raft_get_entry_from_idx
(
me_
,
point
);
if
(
raft_get_commit_idx
(
me_
)
<
point
&&
ety
->
term
==
me
->
current_term
)
{
int
votes
=
raft_node_is_voting
(
me
->
node
)
?
1
:
0
;
for
(
int
i
=
0
;
i
<
me
->
num_nodes
;
i
++
)
{
raft_node_t
*
follower
=
me
->
nodes
[
i
];
if
(
me
->
node
!=
follower
&&
raft_node_is_voting
(
follower
)
&&
point
<=
raft_node_get_match_idx
(
follower
))
{
votes
++
;
}
}
if
(
raft_get_num_voting_nodes
(
me_
)
/
2
<
votes
)
raft_set_commit_idx
(
me_
,
point
);
}
if
(
ety
)
raft_entry_release
(
ety
);
}
/* Aggressively send remaining entries */
if
(
raft_node_get_next_idx
(
node
)
<=
raft_get_current_idx
(
me_
))
raft_send_appendentries
(
me_
,
node
);
/* periodic applies committed entries lazily */
if
(
me
->
auto_flush
)
return
raft_flush
(
me_
,
0
);
return
0
;
}
...
...
@@ -960,6 +944,12 @@ int raft_recv_appendentries(
r
->
current_idx
=
ae
->
prev_log_idx
+
1
+
i
;
}
if
(
ae
->
n_entries
>
0
)
{
e
=
me
->
log_impl
->
sync
(
me
->
log
);
if
(
0
!=
e
)
goto
out
;
}
/* 4. If leaderCommit > commitIndex, set commitIndex =
min(leaderCommit, index of most recent entry) */
if
(
raft_get_commit_idx
(
me_
)
<
ae
->
leader_commit
)
...
...
@@ -1120,7 +1110,6 @@ int raft_recv_entry(raft_server_t* me_,
msg_entry_response_t
*
r
)
{
raft_server_private_t
*
me
=
(
raft_server_private_t
*
)
me_
;
int
i
;
if
(
raft_entry_is_voting_cfg_change
(
ety
))
{
...
...
@@ -1148,30 +1137,18 @@ int raft_recv_entry(raft_server_t* me_,
if
(
0
!=
e
)
return
e
;
for
(
i
=
0
;
i
<
me
->
num_nodes
;
i
++
)
{
raft_node_t
*
node
=
me
->
nodes
[
i
];
if
(
me
->
node
==
node
||
!
node
)
continue
;
/* Only send new entries.
* Don't send the entry to peers who are behind, to prevent them from
* becoming congested. */
raft_index_t
next_idx
=
raft_node_get_next_idx
(
node
);
if
(
next_idx
==
raft_get_current_idx
(
me_
))
raft_send_appendentries
(
me_
,
node
);
}
/* if we are the only voter, commit now, as no appendentries_response will occur */
if
(
raft_is_single_node_voting_cluster
(
me_
))
{
raft_set_commit_idx
(
me_
,
raft_get_current_idx
(
me_
));
}
r
->
id
=
ety
->
id
;
r
->
idx
=
raft_get_current_idx
(
me_
);
r
->
term
=
me
->
current_term
;
if
(
me
->
auto_flush
)
{
e
=
me
->
log_impl
->
sync
(
me
->
log
);
if
(
0
!=
e
)
return
e
;
return
raft_flush
(
me_
,
raft_get_current_idx
(
me_
));
}
return
0
;
}
...
...
@@ -1509,8 +1486,10 @@ int raft_recv_snapshot_response(raft_server_t* me_,
raft_node_get_next_idx
(
node
)));
}
/* Send snapshot or appendentries depending on next idx */
return
raft_send_appendentries
(
me_
,
node
);
if
(
me
->
auto_flush
)
return
raft_flush
(
me_
,
0
);
return
0
;
}
int
raft_send_appendentries
(
raft_server_t
*
me_
,
raft_node_t
*
node
)
...
...
@@ -1535,6 +1514,12 @@ int raft_send_appendentries(raft_server_t* me_, raft_node_t* node)
if
(
!
me
->
cb
.
send_appendentries
)
return
-
1
;
if
(
me
->
cb
.
backpressure
)
{
if
(
me
->
cb
.
backpressure
(
me_
,
me
->
udata
,
node
)
!=
0
)
{
return
0
;
}
}
msg_appendentries_t
ae
=
{
.
term
=
me
->
current_term
,
.
leader_id
=
raft_get_nodeid
(
me_
),
...
...
@@ -1591,7 +1576,6 @@ int raft_send_appendentries_all(raft_server_t* me_)
int
i
,
e
;
int
ret
=
0
;
me
->
timeout_elapsed
=
0
;
for
(
i
=
0
;
i
<
me
->
num_nodes
;
i
++
)
{
if
(
me
->
node
==
me
->
nodes
[
i
])
...
...
@@ -1699,7 +1683,7 @@ int raft_poll_entry(raft_server_t* me_)
if
(
e
!=
0
)
return
e
;
return
0
;
return
me
->
log_impl
->
sync
(
me
->
log
)
;
}
int
raft_pop_entry
(
raft_server_t
*
me_
)
...
...
@@ -1708,8 +1692,12 @@ int raft_pop_entry(raft_server_t* me_)
raft_index_t
cur_idx
=
me
->
log_impl
->
current_idx
(
me
->
log
);
return
me
->
log_impl
->
pop
(
me
->
log
,
cur_idx
,
(
func_entry_notify_f
)
raft_handle_remove_cfg_change
,
me_
);
int
e
=
me
->
log_impl
->
pop
(
me
->
log
,
cur_idx
,
(
func_entry_notify_f
)
raft_handle_remove_cfg_change
,
me_
);
if
(
e
!=
0
)
return
e
;
return
me
->
log_impl
->
sync
(
me
->
log
);
}
raft_index_t
raft_get_first_entry_idx
(
raft_server_t
*
me_
)
...
...
@@ -1794,6 +1782,10 @@ int raft_end_snapshot(raft_server_t *me_)
if
(
e
!=
0
)
return
e
;
e
=
me
->
log_impl
->
sync
(
me
->
log
);
if
(
e
!=
0
)
return
e
;
me
->
snapshot_in_progress
=
0
;
raft_log
(
me_
,
...
...
@@ -1956,7 +1948,7 @@ void raft_entry_release_list(raft_entry_t **ety_list, size_t len)
}
}
void
raft_queue_read_request
(
raft_server_t
*
me_
,
func_read_request_callback_f
cb
,
void
*
cb_arg
)
int
raft_queue_read_request
(
raft_server_t
*
me_
,
func_read_request_callback_f
cb
,
void
*
cb_arg
)
{
raft_server_private_t
*
me
=
(
raft_server_private_t
*
)
me_
;
...
...
@@ -1975,7 +1967,12 @@ void raft_queue_read_request(raft_server_t* me_, func_read_request_callback_f cb
me
->
read_queue_tail
->
next
=
req
;
me
->
read_queue_tail
=
req
;
raft_send_appendentries_all
(
me_
);
me
->
need_quorum_round
=
1
;
if
(
me
->
auto_flush
)
return
raft_flush
(
me_
,
0
);
return
0
;
}
static
void
pop_read_queue
(
raft_server_private_t
*
me
,
int
can_read
)
...
...
@@ -2121,3 +2118,102 @@ void raft_reset_transfer_leader(raft_server_t* me_, int timed_out)
me
->
sent_timeout_now
=
0
;
}
}
static
int
index_cmp
(
const
void
*
a
,
const
void
*
b
)
{
raft_index_t
va
=
*
((
raft_index_t
*
)
a
);
raft_index_t
vb
=
*
((
raft_index_t
*
)
b
);
return
va
>
vb
?
-
1
:
1
;
}
static
int
raft_update_commit_idx
(
raft_server_t
*
me_
)
{
raft_server_private_t
*
me
=
(
raft_server_private_t
*
)
me_
;
raft_index_t
indexes
[
me
->
num_nodes
];
int
num_voters
=
0
;
memset
(
indexes
,
0
,
sizeof
(
indexes
));
for
(
int
i
=
0
;
i
<
me
->
num_nodes
;
i
++
)
{
if
(
!
raft_node_is_voting
(
me
->
nodes
[
i
]))
continue
;
indexes
[
num_voters
++
]
=
raft_node_get_match_idx
(
me
->
nodes
[
i
]);
}
qsort
(
indexes
,
num_voters
,
sizeof
(
raft_index_t
),
index_cmp
);
raft_index_t
commit
=
indexes
[
num_voters
/
2
];
if
(
commit
>
me
->
commit_idx
)
{
/* Leader can only commit entries from the current term */
raft_entry_t
*
ety
=
raft_get_entry_from_idx
(
me_
,
commit
);
if
(
ety
->
term
==
me
->
current_term
)
raft_set_commit_idx
(
me_
,
commit
);
raft_entry_release
(
ety
);
}
return
0
;
}
raft_index_t
raft_get_index_to_sync
(
raft_server_t
*
me_
)
{
raft_server_private_t
*
me
=
(
raft_server_private_t
*
)
me_
;
raft_index_t
idx
=
raft_get_current_idx
(
me_
);
if
(
me
->
next_sync_index
>
idx
)
return
0
;
me
->
next_sync_index
=
idx
+
1
;
return
idx
;
}
int
raft_set_auto_flush
(
raft_server_t
*
me_
,
int
flush
)
{
raft_server_private_t
*
me
=
(
raft_server_private_t
*
)
me_
;
me
->
auto_flush
=
flush
?
1
:
0
;
return
0
;
}
int
raft_flush
(
raft_server_t
*
me_
,
raft_index_t
sync_index
)
{
raft_server_private_t
*
me
=
(
raft_server_private_t
*
)
me_
;
if
(
!
raft_is_leader
(
me_
))
{
return
0
;
}
if
(
sync_index
>
raft_node_get_match_idx
(
me
->
node
))
{
raft_node_set_match_idx
(
me
->
node
,
sync_index
);
}
int
e
=
raft_update_commit_idx
(
me_
);
if
(
e
!=
0
)
{
return
e
;
}
for
(
int
i
=
0
;
i
<
me
->
num_nodes
;
i
++
)
{
if
(
me
->
node
==
me
->
nodes
[
i
])
continue
;
if
(
!
me
->
need_quorum_round
&&
raft_node_get_next_idx
(
me
->
nodes
[
i
])
>
raft_get_current_idx
(
me_
))
continue
;
raft_send_appendentries
(
me_
,
me
->
nodes
[
i
]);
}
me
->
need_quorum_round
=
0
;
if
(
me
->
last_applied_idx
<
raft_get_commit_idx
(
me_
))
{
e
=
raft_apply_all
(
me_
);
if
(
e
!=
0
)
{
return
e
;
}
}
raft_process_read_queue
(
me_
);
return
0
;
}
tests/helpers.h
View file @
9f348d65
...
...
@@ -36,6 +36,7 @@ static void __RAFT_APPEND_ENTRY(void *r, int id, raft_term_t term, const char *d
{
raft_entry_t
*
e
=
__MAKE_ENTRY
(
id
,
term
,
data
);
raft_append_entry
(
r
,
e
);
raft_node_set_match_idx
(
raft_get_my_node
(
r
),
raft_get_current_idx
(
r
));
}
static
void
__RAFT_APPEND_ENTRIES_SEQ_ID
(
void
*
r
,
int
count
,
int
id
,
raft_term_t
term
,
const
char
*
data
)
...
...
tests/test_log_impl.c
View file @
9f348d65
...
...
@@ -9,6 +9,7 @@
#include "linked_list_queue.h"
#include "raft.h"
#include "raft_private.h"
#include "helpers.h"
static
raft_node_id_t
__get_node_id
(
...
...
tests/test_server.c
View file @
9f348d65
...
...
@@ -126,6 +126,7 @@ void TestRaft_server_get_my_node(CuTest * tc)
void
TestRaft_server_idx_starts_at_1
(
CuTest
*
tc
)
{
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
CuAssertTrue
(
tc
,
0
==
raft_get_current_idx
(
r
));
raft_set_current_term
(
r
,
1
);
...
...
@@ -285,6 +286,7 @@ void TestRaft_server_append_entry_means_entry_gets_current_term(CuTest* tc)
void
TestRaft_server_append_entry_is_retrievable
(
CuTest
*
tc
)
{
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_set_callbacks
(
r
,
&
generic_funcs
,
NULL
);
raft_set_state
(
r
,
RAFT_STATE_CANDIDATE
);
...
...
@@ -343,7 +345,7 @@ void TestRaft_server_entry_is_retrieveable_using_idx(CuTest* tc)
char
*
str2
=
"bbb"
;
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_set_current_term
(
r
,
1
);
__RAFT_APPEND_ENTRY
(
r
,
1
,
1
,
str
);
...
...
@@ -397,6 +399,8 @@ void TestRaft_server_increment_lastApplied_when_lastApplied_lt_commitidx(
};
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_node_set_voting
(
raft_get_my_node
(
r
),
0
);
raft_set_callbacks
(
r
,
&
funcs
,
NULL
);
/* must be follower */
...
...
@@ -423,6 +427,7 @@ void TestRaft_user_applylog_error_propogates_to_periodic(
};
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_set_callbacks
(
r
,
&
funcs
,
NULL
);
/* must be follower */
...
...
@@ -447,6 +452,7 @@ void TestRaft_server_apply_entry_increments_last_applied_idx(CuTest* tc)
};
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_set_callbacks
(
r
,
&
funcs
,
NULL
);
raft_set_last_applied_idx
(
r
,
0
);
raft_set_current_term
(
r
,
1
);
...
...
@@ -572,6 +578,7 @@ void TestRaft_server_recv_entry_auto_commits_if_we_are_the_only_node(CuTest * tc
void
TestRaft_server_recv_entry_fails_if_there_is_already_a_voting_change
(
CuTest
*
tc
)
{
void
*
r
=
raft_new
();
raft_set_auto_flush
(
r
,
0
);
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_set_election_timeout
(
r
,
1000
);
raft_become_leader
(
r
);
...
...
@@ -589,7 +596,7 @@ void TestRaft_server_recv_entry_fails_if_there_is_already_a_voting_change(CuTest
raft_entry_t
*
ety2
=
__MAKE_ENTRY
(
2
,
1
,
"entry"
);
ety2
->
type
=
RAFT_LOGTYPE_ADD_NODE
;
CuAssertTrue
(
tc
,
RAFT_ERR_ONE_VOTING_CHANGE_ONLY
==
raft_recv_entry
(
r
,
ety2
,
&
cr
));
CuAssertTrue
(
tc
,
1
==
raft_get_commit_idx
(
r
));
CuAssertTrue
(
tc
,
0
==
raft_get_commit_idx
(
r
));
}
void
TestRaft_server_cfg_sets_num_nodes
(
CuTest
*
tc
)
...
...
@@ -3227,6 +3234,7 @@ void TestRaft_leader_recv_entry_resets_election_timeout(
CuTest
*
tc
)
{
void
*
r
=
raft_new
();
raft_add_node
(
r
,
NULL
,
1
,
1
);
raft_set_election_timeout
(
r
,
1000
);
raft_set_state
(
r
,
RAFT_STATE_LEADER
);
...
...
@@ -3361,6 +3369,11 @@ void TestRaft_leader_recv_entry_fails_if_prevlogidx_less_than_commit(CuTest * tc
CuAssertIntEquals
(
tc
,
0
,
aer
.
success
);
}
int
backpressure
(
raft_server_t
*
raft
,
void
*
udata
,
raft_node_t
*
node
)
{
return
1
;
}
void
TestRaft_leader_recv_entry_does_not_send_new_appendentries_to_slow_nodes
(
CuTest
*
tc
)
{
void
*
r
=
raft_new
();
...
...
@@ -3370,6 +3383,7 @@ void TestRaft_leader_recv_entry_does_not_send_new_appendentries_to_slow_nodes(Cu
raft_cbs_t
funcs
=
{
.
persist_term
=
__raft_persist_term
,
.
send_appendentries
=
sender_appendentries
,
.
backpressure
=
backpressure
};
void
*
sender
=
sender_new
(
NULL
);
...
...
tests/virtraft2.py
View file @
9f348d65
...
...
@@ -22,6 +22,7 @@ Usage:
virtraft --servers SERVERS [-d RATE] [-D RATE] [-c RATE] [-C RATE] [-m RATE]
[-P RATE] [-s SEED] [-i ITERS] [-p] [--tsv] [--rqm MULTI]
[-q] [-v] [-l LEVEL] [-j] [-L LOGFILE] [--duplex_partition]
[--auto_flush]
virtraft --version
virtraft --help
...
...
@@ -49,6 +50,7 @@ Options:
-V --version Display version.
-h --help Prints a short usage summary.
--duplex_partition On partition, prevent traffic from flowing in both directions
--auto_flush Use libraft with auto_flush option
Examples:
...
...
@@ -311,6 +313,9 @@ def verify_read(arg):
continue
node
=
lib
.
raft_get_node
(
net
.
servers
[
i
-
1
].
raft
,
leader
.
id
)
if
node
==
ffi
.
NULL
:
continue
msg_id
=
lib
.
raft_node_get_max_seen_msg_id
(
node
)
if
msg_id
>=
arg
:
count
+=
1
...
...
@@ -363,6 +368,7 @@ class Network(object):
self
.
partitions
=
set
()
self
.
no_random_period
=
False
self
.
duplex_partition
=
False
self
.
auto_flush
=
False
self
.
last_seen_read_queue_msg_id
=
-
1
self
.
rqm
=
10000000000
...
...
@@ -458,6 +464,12 @@ class Network(object):
else
:
server
.
periodic
(
self
.
random
.
randint
(
1
,
100
))
if
not
self
.
auto_flush
:
# Pretend like async disk write operation is completed. Also,
# call raft_flush() often to trigger sending appendentries.
idx
=
lib
.
raft_get_index_to_sync
(
server
.
raft
)
assert
lib
.
raft_flush
(
server
.
raft
,
idx
)
==
0
# Deadlock detection
if
self
.
client_rate
!=
0
and
self
.
latest_applied_log_idx
!=
0
and
self
.
latest_applied_log_iteration
+
5000
<
self
.
iteration
:
logger
.
error
(
"deadlock detected iteration:{0} appliedidx:{1}
\n
"
.
format
(
...
...
@@ -767,7 +779,6 @@ class Network(object):
logger
.
info
(
f
"trying to remove follower: node
{
lib
.
raft_get_nodeid
(
server
.
raft
)
}
"
)
# Wake up new node
assert
NODE_CONNECTED
==
server
.
connection_status
server
.
set_connection_status
(
NODE_DISCONNECTING
)
def
diagnostic_info
(
self
):
...
...
@@ -838,6 +849,7 @@ class RaftServer(object):
lib
.
raft_set_callbacks
(
self
.
raft
,
cbs
,
self
.
udata
)
lib
.
log_set_callbacks
(
lib
.
raft_get_log
(
self
.
raft
),
log_cbs
,
self
.
raft
)
lib
.
raft_set_election_timeout
(
self
.
raft
,
500
)
lib
.
raft_set_auto_flush
(
self
.
raft
,
network
.
auto_flush
)
self
.
fsm_dict
=
{}
self
.
fsm_log
=
[]
...
...
@@ -1353,6 +1365,7 @@ if __name__ == '__main__':
net
.
no_random_period
=
1
==
int
(
args
[
'--no_random_period'
])
net
.
duplex_partition
=
1
==
int
(
args
[
'--duplex_partition'
])
net
.
rqm
=
int
(
args
[
'--rqm'
])
net
.
auto_flush
=
1
==
int
(
args
[
'--auto_flush'
])
net
.
num_of_servers
=
int
(
args
[
'--servers'
])
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment