Skip to content
GitLab
Explore
Sign in
Register
Primary navigation
Search or go to…
Project
P
picodata
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Container Registry
Model registry
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
core
picodata
Commits
31090697
Commit
31090697
authored
1 year ago
by
Georgy Moshkin
Browse files
Options
Downloads
Patches
Plain Diff
test: make test_large_snapshot less flaky
parent
98b885fd
No related branches found
Branches containing commit
No related tags found
Tags containing commit
4 merge requests
!807
Draft: fix: remove deprecated message for install nodejs
,
!794
Gmoshkin/reachability manager yield safety
,
!778
sbroad: refactoring of the table constructor API
,
!757
fix: more patches for gamayun
Pipeline
#29697
passed
1 year ago
Stage: test
Changes
1
Pipelines
3
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
test/int/test_snapshot.py
+30
-27
30 additions, 27 deletions
test/int/test_snapshot.py
with
30 additions
and
27 deletions
test/int/test_snapshot.py
+
30
−
27
View file @
31090697
...
...
@@ -66,7 +66,7 @@ def assert_eq(lhs, rhs):
def
test_large_snapshot
(
cluster
:
Cluster
):
i1
,
i2
,
i3
=
cluster
.
deploy
(
instance_count
=
3
)
i1
,
i2
,
i3
,
i4
=
cluster
.
deploy
(
instance_count
=
4
)
# TODO: rewrite using clusterwide settings when we implement those
script_path
=
f
"
{
cluster
.
data_dir
}
/postjoin.lua
"
...
...
@@ -80,7 +80,7 @@ def test_large_snapshot(cluster: Cluster):
param
=
dict
(
# Number of tuples to insert
# N=4 * 1024 * 1024,
N
=
4
*
1024
*
1024
/
8
,
N
=
4
*
1024
*
1024
/
32
,
# Average size of each tuple (approximate, doesn't include key and meta)
T
=
512
,
# Max deviation from the average tuple size
...
...
@@ -171,6 +171,7 @@ def test_large_snapshot(cluster: Cluster):
Retriable
(
timeout
,
5
).
call
(
inner
)
cluster
.
cas
(
"
replace
"
,
"
_pico_property
"
,
[
"
snapshot_chunk_max_size
"
,
1024
*
1024
])
cluster
.
create_table
(
dict
(
name
=
"
BIG
"
,
...
...
@@ -184,13 +185,13 @@ def test_large_snapshot(cluster: Cluster):
)
# This one will be receiving a snapshot
i
3
.
terminate
()
i
4
.
terminate
()
for
i
in
[
i1
,
i2
]:
for
i
in
[
i1
,
i2
,
i3
]:
def_prepare_samples
(
i
)
start_preparing_data
(
i
)
for
i
in
[
i1
,
i2
]:
for
i
in
[
i1
,
i2
,
i3
]:
wait_data_prepared
(
i
)
index
=
cluster
.
cas
(
"
insert
"
,
"
_pico_property
"
,
[
"
big
"
,
"
data
"
])
...
...
@@ -198,59 +199,61 @@ def test_large_snapshot(cluster: Cluster):
i1
.
raft_compact_log
()
i2
.
raft_compact_log
()
i3
.
raft_compact_log
()
# First i1 is leader and i
3
starts reading snapshot from it.
# First i1 is leader and i
4
starts reading snapshot from it.
i1
.
promote_or_fail
()
t_i
3
=
time
.
time
()
t_i
4
=
time
.
time
()
# Restart the instance triggering the chunky snapshot application.
i
3
.
env
[
"
PICODATA_SCRIPT
"
]
=
script_path
i
3
.
start
()
i
4
.
env
[
"
PICODATA_SCRIPT
"
]
=
script_path
i
4
.
start
()
# Wait for i
3
to start receiving the snapshot
# Wait for i
4
to start receiving the snapshot
Retriable
(
10
,
60
).
call
(
lambda
:
assert_eq
(
i
3
.
_raft_status
().
main_loop_status
,
"
receiving snapshot
"
)
lambda
:
assert_eq
(
i
4
.
_raft_status
().
main_loop_status
,
"
receiving snapshot
"
)
)
# In the middle of snapshot application propose a new entry
index
=
cluster
.
cas
(
"
insert
"
,
"
_pico_property
"
,
[
"
pokemon
"
,
"
snap
"
])
for
i
in
[
i1
,
i2
]:
for
i
in
[
i1
,
i2
,
i3
]:
i
.
raft_wait_index
(
index
)
# Add a new instance so that it starts reading the same snapshot
t_i
4
=
time
.
time
()
i
4
=
cluster
.
add_instance
(
wait_online
=
False
)
i
4
.
env
[
"
PICODATA_SCRIPT
"
]
=
script_path
i
4
.
start
()
t_i
5
=
time
.
time
()
i
5
=
cluster
.
add_instance
(
wait_online
=
False
)
i
5
.
env
[
"
PICODATA_SCRIPT
"
]
=
script_path
i
5
.
start
()
# Wait for i
4
to start receiving the snapshot
# Wait for i
5
to start receiving the snapshot
Retriable
(
10
,
60
).
call
(
lambda
:
assert_eq
(
i
3
.
_raft_status
().
main_loop_status
,
"
receiving snapshot
"
)
lambda
:
assert_eq
(
i
5
.
_raft_status
().
main_loop_status
,
"
receiving snapshot
"
)
)
i1
.
raft_compact_log
()
i2
.
raft_compact_log
()
i3
.
raft_compact_log
()
# At some point i2 becomes leader but i
3
keeps reading snapshot from i1.
# At some point i2 becomes leader but i
4
keeps reading snapshot from i1.
i2
.
promote_or_fail
()
i
3
.
wait_online
(
timeout
=
30
-
(
time
.
time
()
-
t_i
3
))
print
(
f
"
i
3
catching up by snapshot took:
{
time
.
time
()
-
t_i
3
}
s
"
)
i
4
.
wait_online
(
timeout
=
30
-
(
time
.
time
()
-
t_i
4
))
print
(
f
"
i
4
catching up by snapshot took:
{
time
.
time
()
-
t_i
4
}
s
"
)
i
4
.
wait_online
(
timeout
=
20
-
(
time
.
time
()
-
t_i
4
))
print
(
f
"
i
4
booting up by snapshot took:
{
time
.
time
()
-
t_i
4
}
s
"
)
i
5
.
wait_online
(
timeout
=
20
-
(
time
.
time
()
-
t_i
5
))
print
(
f
"
i
5
booting up by snapshot took:
{
time
.
time
()
-
t_i
5
}
s
"
)
#
# Check snapshot was applied correctly.
#
assert
i
3
.
call
(
"
box.space._pico_property:get
"
,
"
big
"
)
==
[
"
big
"
,
"
data
"
]
assert
i
4
.
call
(
"
box.space._pico_property:get
"
,
"
big
"
)
==
[
"
big
"
,
"
data
"
]
expected_count
=
i1
.
call
(
"
box.space._pico_property:count
"
)
assert
isinstance
(
expected_count
,
int
)
assert
i
3
.
call
(
"
box.space._pico_property:count
"
)
==
expected_count
assert
i
4
.
call
(
"
box.space._pico_property:count
"
)
==
expected_count
def_prepare_samples
(
i
3
)
i
3
.
eval
(
def_prepare_samples
(
i
4
)
i
4
.
eval
(
"""
\
local log = require
'
log
'
local math = require
'
math
'
...
...
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment