Newer
Older
retry:
max: 1
when:
- scheduler_failure
- runner_system_failure
- stuck_or_timeout_failure
- api_failure
- front-deploy
workflow:
# See https://docs.gitlab.com/ee/ci/jobs/job_control.html#avoid-duplicate-pipelines
rules:
# To avoid duplicate pipelines we disable merge request events,
# leaving only pushes and manual triggering.
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- if: $CI_PIPELINE_SOURCE == "push"
- if: $CI_PIPELINE_SOURCE == "web"
REGISTRY: docker-public.binary.picodata.io
BASE_IMAGE: ${REGISTRY}/picodata-build-base
BASE_IMAGE_LATEST: latest
MAIN_BRANCH: &main-branch master
CARGO_HOME: $CI_PROJECT_DIR/.cargo
KANIKO_REGISTRY_MIRROR: docker-proxy.binary.picodata.io
FF_USE_FASTZIP: "true"
CACHE_COMPRESSION_LEVEL: "fastest"
PARENT_BRANCH: $CI_COMMIT_BRANCH
PARENT_CI_COMMIT_SHA: $CI_COMMIT_SHA
PARENT_PROJECT_PATH: ${CI_PROJECT_PATH}.git
KUBERNETES_CPU_REQUEST: 6
KUBERNETES_MEMORY_REQUEST: "6Gi"
# job:rules explained:
#
# - if build-base changes on master branch (compared to HEAD~1)
# * build-base-image (with tag latest) and push
# * test (on base-image:latest)
# - if build-base changes on development branch (compared to master)
# * build-base-image (with tag sha)
# * test (on base-image:sha)
# - else (if build-base doesn't change)
# * skip build-base-image
# * just test (on base-image:latest)
#
# Anchor syntax explained here:
# https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html
#
.rules:
- &if-build-base-changes-on-master-branch
if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
changes:
# implies compare_to HEAD~1
paths: &build-base-changes-paths
- docker-build-base/**
- .gitlab-ci.yml
- &if-build-base-changes-on-dev-branch
if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
compare_to: *main-branch
paths: *build-base-changes-paths
- &else {}
.base_cache: &base_cache
paths:
- .cargo/
key:
files:
- Cargo.lock
.base_node: &base_node
paths:
.py_cache: &py_cache
paths:
- .venv
key:
files:
- Pipfile.lock
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
BASE_IMAGE_TAG: ${PARENT_CI_COMMIT_SHA}
DOCKERFILE: docker-build-base/Dockerfile
DESTINATION: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
PUSH_DOCKER: ""
trigger:
project: picodata/devops/picodata-in-docker
branch: main
strategy: depend
variables:
GIT_DEPTH: 100
GIT_SUBMODULE_STRATEGY: recursive
before_script:
# Gitlab CI implicitly clones specific refs (e.g. `refs/pipelines/xxxxxxx`),
# but it doesn't imply fetching tags. We clone them manually with the
# `git fetch` command.
#
# Tags in `tarantool-sys` and `luajit` submodules are necessary for
# the build scripts. Without them the job fails.
# Fetch tags
ci-log-section start "fetch-submodule-tags" Fetching tags for submodules
for s in . tarantool-sys tarantool-sys/third_party/luajit; do
echo "Fetching tag for $s"
pushd $s
until git describe; do git fetch --deepen 100; done
popd
done
ci-log-section end "fetch-submodule-tags"
.parallel:
parallel:
matrix:
.pipenv-install:
&pipenv-install |
# Pipenv install
ci-log-section start "pipenv-install" Installing pip dependencies ...
PIPENV_VENV_IN_PROJECT=1 PIP_NO_CACHE_DIR=true python3.10 -m pipenv install --deploy
ci-log-section end "pipenv-install"
test-linux:
extends:
- .test
- .parallel
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
- <<: *else
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
pull_policy: if-not-present
variables:
GIT_DEPTH: 1
cache:
- <<: *py_cache
- <<: *base_cache
- |
# Check rust version consistency
ci-log-section start "rust-version" Checking rust version consistency ...
exec 3>&1 # duplicate stdout fd
grep_rust_version() { echo "$1: $(sed -nr "s/.*rust-version = \"(\S+)\".*/\1/p" $1)"; }
grep_toolchain() { echo "$1: $(sed -nr "s/.*--default-toolchain (\S+).*/\1/p" $1)"; }
UNIQUE_VERSIONS=$(
{
grep_rust_version Cargo.toml;
grep_toolchain Makefile;
grep_toolchain docker-build-base/Dockerfile;
grep_toolchain helm/picodata.Dockerfile;
} \
| tee /dev/fd/3 \
| cut -d: -f2- | sort | uniq | wc -l
);
ci-log-section end "rust-version"
if [ "$UNIQUE_VERSIONS" != "1" ]; then
echo "Error: checking rust version consistency failed"
exit 1
fi
# There are no Rust tests for `webui` feature, it's checked in pytest
- cargo build --timings --locked --profile=$BUILD_PROFILE
- cargo build -p gostech-audit-log --profile=$BUILD_PROFILE
- cargo build -p testplug --profile=$BUILD_PROFILE
- cargo test --locked --profile=$BUILD_PROFILE
- cargo build --features webui,error_injection --locked --profile=$BUILD_PROFILE
# -vv shows extended diff for failures
BUILD_PROFILE=$BUILD_PROFILE pipenv run pytest -vv
--numprocesses auto
--junitxml=junit_pytest.xml
--with-webui
artifacts:
when: always
paths:
- junit_pytest.xml
reports:
junit: junit_pytest.xml
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
lint:
stage: test
interruptible: true
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
- <<: *else
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
pull_policy: if-not-present
variables:
GIT_DEPTH: 1
GIT_SUBMODULE_STRATEGY: recursive
cache:
- <<: *py_cache
policy: pull
- <<: *base_cache
policy: pull
- <<: *base_node
policy: pull
script:
- *fetch-tags
- *pipenv-install
- make lint
- cd webui && yarn lint
.test-patch-rules: &test-patch-rules
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_TAG}
# build base didnt chage but we still want the job to run
- if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_TAG}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
# build base didnt change but we still want the job to run
# in case certification_patches has changed
- if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
changes:
allow_failure: true
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
# manual in other cases
- if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
when: manual
allow_failure: true
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
test-patch-picodata:
extends: .test
<<: *test-patch-rules
GIT_SUBMODULE_STRATEGY: recursive
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
pull_policy: always
cache:
- <<: *py_cache
- <<: *base_cache
- <<: *base_node
script:
- PIPENV_VENV_IN_PROJECT=1 PIP_NO_CACHE_DIR=true python3.10 -m pipenv install --deploy
- ./tools/prepare_source_tree_for_stat_analysis.py apply
- cargo build --locked --release --features dynamic_build
- cargo build -p testplug --release
- cargo build -p gostech-audit-log --release
- cargo test --locked --release --features dynamic_build
- cargo build --features webui,error_injection --locked --release --features dynamic_build
- >
BUILD_PROFILE=release pipenv run pytest -vv
--color=yes
--numprocesses auto
--junitxml=junit_pytest.xml
--with-webui
test-patch-tarantool:
extends: .test
<<: *test-patch-rules
GIT_SUBMODULE_STRATEGY: recursive
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
pull_policy: always
script:
- pushd tarantool-sys/ && make -f .test.mk test-release; popd
extends: .test
tags:
- mac-dev-m1
script:
# Gitlab doesnt know about $HOME. If specified in variables it becomes empty
- export CARGO_HOME=$HOME/.cargo
- cargo -V
- cargo build --locked
- cargo build --features webui --locked
# There are no Rust tests for `webui` feature.
# It will be checked during integration tests.
- cargo fmt -- -v --check
- cargo clippy --version
- cargo clippy --features "load_test webui error_injection" -- --deny clippy::all --no-deps
# - |
# # Pipenv install
# ci-log-section start "pipenv-install" Installing pip dependencies ...
# PIPENV_VENV_IN_PROJECT=1 PIP_NO_CACHE_DIR=true python3.10 -m pipenv install --deploy
# ci-log-section end "pipenv-install"
# - pipenv run pytest --numprocesses auto -v
# - pipenv run lint
# - |
- if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
compare_to: *main-branch
paths:
- helm/picodata.Dockerfile
- helm/picodata-diag.Dockerfile
- if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
allow_failure: true
test-helm-image:
extends: .test-helm
parallel:
matrix:
- DOCKERFILE: helm/picodata-diag.Dockerfile
DESTINATION: ${REGISTRY}/picodata-diag:${PARENT_CI_COMMIT_SHA}
- DOCKERFILE: helm/picodata.Dockerfile
DESTINATION: ${REGISTRY}/picodata:${PARENT_CI_COMMIT_SHA}
trigger:
project: picodata/devops/picodata-in-docker
branch: main
strategy: depend
gamayun-prepare:
extends: .test
when: manual
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
- <<: *else
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
pull_policy: if-not-present
script:
- export CARGO_HOME=$HOME/.cargo
- cargo -V
- cargo clippy --locked --message-format=json > clippy.json
artifacts:
when: always
paths:
- clippy.json
gamayun-run:
extends: .test
needs: ["gamayun-prepare"]
tags:
- picodata-shell
script:
# create an ssh tunnel to gamayun server to allow the report uploading
- TUNNEL="ssh -4 -L 9000:localhost:9000 sonar-reports -N -f"
# svace patches is more iportant than gamayun and order of applying is important
# first - svace, second - gamayun
./tools/prepare_source_tree_for_stat_analysis.py apply
- |
# TODO consider moving this to the script as well, this may delete some html
# license files which is probably not intended
find . -type d -name 'test*' -o -name 'doc' -o -name 'docs' | xargs -n1 rm -rvf
find tarantool-sys/third_party/ -type f -name '*.htm*' | xargs -n 1 rm -rfv
find tarantool-sys/vendor/ -type f -name '*.htm*' | xargs -n 1 rm -rfv
find . -type d -name stress-test | xargs -n 1 rm -rvf
find . -type d -name integration-tests | xargs -n 1 rm -rvf
find . -type d -name 'example*' | xargs -n 1 rm -rvf
find . -type d -name 'docsrc' | xargs -n 1 rm -rvf
find . -name '*.md' | xargs -n 1 rm -rvf
find http tarantool-sys vshard -type d -name .github | xargs -n 1 rm -rfv
- |
docker run --rm -t \
-v $PWD:/tmp/src:rw \
-e "SONAR_OPS=-Dsonar.python.version=3 -Dsonar.login=${SONAR} -Dsonar.projectKey=Picodata-CI -Dsonar.exclusions=**/*.mod,osv-sonar.json" \
-e "SONAR_SCANNER_OPTS="-Xmx4096m"" \
-e "CARGO_CLIPPY_FILE=clippy.json" \
-u $(id -u):$(id -g) --ulimit nofile=100000:100000 --network=host \
docker.binary.picodata.io/gamayun
build-vm-image:
stage: test
when: manual
inherit:
variables: false
variables:
BRANCH: $CI_COMMIT_BRANCH
trigger:
project: picodata/picodata/picodata-fstek-vmbuilder
branch: main
strategy: depend
pack-on-tag:
stage: pack
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_COMMIT_TAG
PROJECT_TARGET: "picodata"
TYPE: "RELEASE"
BRANCH_TARGET: $CI_COMMIT_TAG
- PROJECT_TARGET
- BRANCH_TARGET
trigger:
variables:
PUSH_DOCKER: ""
rules:
- if: $CI_COMMIT_TAG
deploy-docker:
extends: .deploy-docker-tmpl
parallel:
matrix:
- DOCKERFILE: helm/picodata-diag.Dockerfile
DESTINATION: ${REGISTRY}/picodata-diag:${CI_COMMIT_TAG}
- DOCKERFILE: helm/picodata.Dockerfile
DESTINATION: ${REGISTRY}/picodata:${CI_COMMIT_TAG}
trigger:
project: picodata/devops/picodata-in-docker
branch: main
strategy: depend
# Stages of independent stress testing in downstream pipeline
# We cannot move artefacts to the downstream pipeline, so we need to build and upload them to our repo
.upload-picodata-to-binary:
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
variables:
VER: $CI_COMMIT_SHORT_SHA
GIT_DEPTH: 100
GIT_SUBMODULE_STRATEGY: recursive
before_script:
# To conduct stress tests, we require the most recent picodata tag. However,
# creating a shallow copy with $GIT_DEPTH commits might not include this tag.
# Fetching all commits takes too much time, so we incrementally download more
# chunks of commits until we find the required tag.
- until git describe; do git fetch --deepen 100; done
- *fetch-tags
script:
- cargo build --locked --release --features webui
- mv target/release/picodata target/release/picodata-$VER
- curl -v --upload-file target/release/picodata-$VER $RAW_NT_REGISTRY
upload-picodata-to-binary-stress-test:
stage: stress-test
extends: .upload-picodata-to-binary
rules:
- if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
when: always
- if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
when: manual
allow_failure: true
# See https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html
rules:
- if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
when: always
- if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
when: manual
trigger:
project: picodata/devops/proxmox/sbroad-nt
branch: main
strategy: depend
variables:
VER: $CI_COMMIT_SHORT_SHA
needs:
- job: upload-picodata-to-binary-stress-test
upload-picodata-to-binary-front-deploy:
stage: front-deploy
extends: .upload-picodata-to-binary
when: manual
downstream-front-deploy:
interruptible: true
# See https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html
stage: front-deploy
allow_failure: true
trigger:
project: picodata/devops/pico-servers/front
branch: main
strategy: depend
variables:
VER: $CI_COMMIT_SHORT_SHA
needs:
- job: upload-picodata-to-binary-front-deploy