Newer
Older
workflow:
# See https://docs.gitlab.com/ee/ci/jobs/job_control.html#avoid-duplicate-pipelines
rules:
# To avoid duplicate pipelines we disable merge request events,
# leaving only pushes and manual triggering.
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
when: never
- if: $CI_PIPELINE_SOURCE == "push"
- if: $CI_PIPELINE_SOURCE == "web"
REGISTRY: docker-public.binary.picodata.io
BASE_IMAGE: ${REGISTRY}/picodata-build-base
BASE_IMAGE_LATEST: latest
MAIN_BRANCH: &main-branch master
CARGO_HOME: /data/shared-storage/picodata/.cargo
CACHE_ARCHIVE: /shared-storage/picodata/cache-v2.tar
# Helps to tolerate spurious network failures
GET_SOURCES_ATTEMPTS: 3
NO_CACHE:
description: "Do not use cache during build phase"
# job:rules explained:
#
# - if build-base changes on master branch (compared to HEAD~1)
# * build-base-image (with tag latest) and push
# * test (on base-image:latest)
# - if build-base changes on development branch (compared to master)
# * build-base-image (with tag sha)
# * test (on base-image:sha)
# - else (if build-base doesn't change)
# * skip build-base-image
# * just test (on base-image:latest)
#
# Anchor syntax explained here:
# https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html
#
.rules:
- &if-build-base-changes-on-master-branch
if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
changes:
# implies compare_to HEAD~1
paths: &build-base-changes-paths
- docker-build-base/**
- .gitlab-ci.yml
- &if-build-base-changes-on-dev-branch
if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
compare_to: *main-branch
paths: *build-base-changes-paths
- &else {}
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
variables:
GIT_DEPTH: 1
GIT_STRATEGY: fetch
GIT_SUBMODULE_STRATEGY: none
- docker pull ${BASE_IMAGE}:${BASE_IMAGE_LATEST} || true
--cache-from ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
-t ${BASE_IMAGE}:${BASE_IMAGE_TAG}
-f ./docker-build-base/Dockerfile
./docker-build-base
- |
# Push image to registry
if [ "${CI_COMMIT_BRANCH}" == "${MAIN_BRANCH}" ]; then
echo "Pushing ${BASE_IMAGE}:${BASE_IMAGE_TAG}"
mkdir -p $CI_PROJECT_DIR/.docker
echo $DOCKER_AUTH_RW > $CI_PROJECT_DIR/.docker/config.json
docker --config $CI_PROJECT_DIR/.docker/ push ${BASE_IMAGE}:${BASE_IMAGE_TAG}
else
echo "Skip pushing image on a non-master branch"
variables:
GIT_DEPTH: 100
GIT_SUBMODULE_STRATEGY: recursive
before_script:
# Gitlab CI implicitly clones specific refs (e.g. `refs/pipelines/xxxxxxx`),
# but it doesn't imply fetching tags. We clone them manually with the
# `git fetch` command.
#
# Tags in `tarantool-sys` and `luajit` submodules are necessary for
# the build scripts. Without them the job fails.
# Fetch tags
ci-log-section start "fetch-submodule-tags" Fetching tags for submodules
for s in . tarantool-sys tarantool-sys/third_party/luajit; do
echo "Fetching tag for $s"
pushd $s
until git describe; do git fetch --deepen 100; done
popd
done
ci-log-section end "fetch-submodule-tags"
# Gitlab CI caching is shit. So we implement it manually
- |
# Restore cache
git fetch origin master:refs/remotes/origin/master
git branch -a
if [[ -n "$NO_CACHE" ]]; then
echo "Skipping restoring from cache because NO_CACHE is set"
elif [ "$CI_COMMIT_BRANCH" == "$MAIN_BRANCH" ]; then
echo "Skip restoring cache on the master branch"
elif git diff origin/"$MAIN_BRANCH" "$CI_COMMIT_SHA" --submodule=short | grep '^[+-]Subproject commit'; then
echo "Skip restoring cache because submodule(s) changed"
elif [ -f "${CACHE_ARCHIVE}" ]; then
ci-log-section start "restore-cache" Restoring cache from ${CACHE_ARCHIVE} ...
tar -xf ${CACHE_ARCHIVE}
echo "Ok"
du -sh ${CACHE_PATHS} || true
ci-log-section end "restore-cache"
else
echo "No cache found"
fi
after_script:
- |
# Save cache
if [ "$CI_COMMIT_BRANCH" == "$MAIN_BRANCH" ]; then
ci-log-section start "save-cache" Saving cache to ${CACHE_ARCHIVE} ...
du -sh ${CACHE_PATHS} || true
TMPEXT=$RANDOM
tar -cf "${CACHE_ARCHIVE}.${TMPEXT}" ${CACHE_PATHS}
mv -f "${CACHE_ARCHIVE}.${TMPEXT}" "${CACHE_ARCHIVE}"
echo Ok
du -sh ${CACHE_ARCHIVE}
ci-log-section end "save-cache"
else
echo "Skip saving cache on a non-master branch"
fi
test-linux:
extends: .test
tags:
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
- <<: *else
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
pull_policy: if-not-present
- |
# Check rust version consistency
ci-log-section start "rust-version" Checking rust version consistency ...
exec 3>&1 # duplicate stdout fd
grep_rust_version() { echo "$1: $(sed -nr "s/.*rust-version = \"(\S+)\".*/\1/p" $1)"; }
grep_toolchain() { echo "$1: $(sed -nr "s/.*--default-toolchain (\S+).*/\1/p" $1)"; }
UNIQUE_VERSIONS=$(
{
grep_rust_version Cargo.toml;
grep_toolchain Makefile;
grep_toolchain docker-build-base/Dockerfile;
grep_toolchain helm/picodata.Dockerfile;
} \
| tee /dev/fd/3 \
| cut -d: -f2- | sort | uniq | wc -l
);
ci-log-section end "rust-version"
if [ "$UNIQUE_VERSIONS" != "1" ]; then
echo "Error: checking rust version consistency failed"
exit 1
fi
- |
# Pipenv install
ci-log-section start "pipenv-install" Installing pip dependencies ...
PIPENV_VENV_IN_PROJECT=1 PIP_NO_CACHE_DIR=true python3.10 -m pipenv install --deploy
ci-log-section end "pipenv-install"
# There are no Rust tests for `webui` feature, it's checked in pytest
- cargo build -p gostech-audit-log
- cargo build -p gostech-metrics
- cargo build --features webui,error_injection --locked
# -vv shows extended diff for failures
pipenv run pytest -vv
--numprocesses auto
--junitxml=junit_pytest.xml
--with-webui
artifacts:
when: always
paths:
- junit_pytest.xml
reports:
junit: junit_pytest.xml
extends: .test
tags:
- mac-dev-m1
script:
- export CARGO_HOME=$HOME/.cargo
- cargo -V
- cargo build --locked
- cargo build --features webui --locked
# There are no Rust tests for `webui` feature.
# It will be checked during integration tests.
- cargo fmt -- -v --check
- cargo clippy --version
- cargo clippy --features "load_test webui error_injection" -- --deny clippy::all --no-deps
# - |
# # Pipenv install
# ci-log-section start "pipenv-install" Installing pip dependencies ...
# PIPENV_VENV_IN_PROJECT=1 PIP_NO_CACHE_DIR=true python3.10 -m pipenv install --deploy
# ci-log-section end "pipenv-install"
# - pipenv run pytest --numprocesses auto -v
# - pipenv run lint
# - |
stage: test
tags:
- shell
rules:
- if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
compare_to: *main-branch
paths:
- helm/picodata.Dockerfile
- helm/picodata-diag.Dockerfile
- if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
allow_failure: true
variables:
GIT_DEPTH: 100
GIT_STRATEGY: fetch
GIT_SUBMODULE_STRATEGY: recursive
before_script:
- export PATH=docker-build-base:$PATH
- *fetch-tags
script:
- |
# Build docker images
for image in picodata picodata-diag; do
ci-log-section start "test-docker-${image}" Building docker image ${image}
docker build \
--label GIT_COMMIT=${CI_COMMIT_SHA} \
-f helm/${image}.Dockerfile .
ci-log-section end "test-docker-${image}"
done
gamayun-prepare:
extends: .test
when: manual
rules:
- <<: *if-build-base-changes-on-master-branch
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
- <<: *if-build-base-changes-on-dev-branch
variables:
BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
- <<: *else
variables:
BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
pull_policy: if-not-present
tags:
- picodata-docker
script:
- export CARGO_HOME=$HOME/.cargo
- cargo -V
- cargo clippy --locked --message-format=json > clippy.json
- |
# Save cache
CACHE_ARCHIVE="${CACHE_ARCHIVE}_clippy"
ci-log-section start "save-cache" Saving cache to ${CACHE_ARCHIVE} ...
du -sh ${CACHE_PATHS} || true
TMPEXT=$RANDOM
tar -cf "${CACHE_ARCHIVE}.${TMPEXT}" clippy.json
mv -f "${CACHE_ARCHIVE}.${TMPEXT}" "${CACHE_ARCHIVE}"
echo Ok
du -sh ${CACHE_ARCHIVE}
ci-log-section end "save-cache"
gamayun-run:
extends: .test
needs: ["gamayun-prepare"]
tags:
- picodata-shell
script:
- CACHE_ARCHIVE="${CACHE_ARCHIVE}_clippy"
- ls -la ${CACHE_ARCHIVE} || true
- |
# Restore cache
if [ -f "${CACHE_ARCHIVE}" ]; then
ci-log-section start "restore-cache" Restoring cache from ${CACHE_ARCHIVE} ...
tar -xf ${CACHE_ARCHIVE}
echo "Ok"
ci-log-section end "restore-cache"
else
echo "No cache found"
fi
# create an ssh tunnel to gamayun server to allow the report uploading
- TUNNEL="ssh -4 -L 9000:localhost:9000 sonar-reports -N -f"
# svace patches is more iportant than gamayun and order of applying is important
# first - svace, second - gamayun
./tools/prepare_source_tree_for_stat_analysis.py svace
./tools/prepare_source_tree_for_stat_analysis.py gamayun
- |
# TODO consider moving this to the script as well, this may delete some html
# license files which is probably not intended
find . -type d -name 'test*' -o -name 'doc' -o -name 'docs' | xargs -n1 rm -rvf
find tarantool-sys/third_party/ -type f -name '*.htm*' | xargs -n 1 rm -rfv
find tarantool-sys/vendor/ -type f -name '*.htm*' | xargs -n 1 rm -rfv
find . -type d -name stress-test | xargs -n 1 rm -rvf
find . -type d -name integration-tests | xargs -n 1 rm -rvf
find . -type d -name 'example*' | xargs -n 1 rm -rvf
find . -type d -name 'docsrc' | xargs -n 1 rm -rvf
find . -name '*.md' | xargs -n 1 rm -rvf
find http tarantool-sys vshard -type d -name .github | xargs -n 1 rm -rfv
- |
docker run --rm -t \
-v $PWD:/tmp/src:rw \
-e "SONAR_OPS=-Dsonar.python.version=3 -Dsonar.login=${SONAR} -Dsonar.projectKey=Picodata-CI -Dsonar.exclusions=**/*.mod,osv-sonar.json" \
-e "SONAR_SCANNER_OPTS="-Xmx4096m"" \
-e "CARGO_CLIPPY_FILE=clippy.json" \
-u $(id -u):$(id -g) --ulimit nofile=100000:100000 --network=host \
docker.binary.picodata.io/gamayun
build-vm-image:
stage: test
when: manual
inherit:
variables: false
variables:
BRANCH: $CI_COMMIT_BRANCH
trigger:
project: picodata/picodata/picodata-fstek-vmbuilder
branch: main
strategy: depend
pack-on-tag:
stage: pack
rules:
- if: $CI_PIPELINE_SOURCE == "web"
- if: $CI_COMMIT_TAG
PROJECT_TARGET: "picodata"
TYPE: "RELEASE"
BRANCH_TARGET: $CI_COMMIT_TAG
- PROJECT_TARGET
- BRANCH_TARGET
trigger:
tags:
- shell
only:
- web
- tags
variables:
GIT_DEPTH: 100
GIT_STRATEGY: fetch
GIT_SUBMODULE_STRATEGY: recursive
before_script:
- export PATH=docker-build-base:$PATH
- *fetch-tags
- mkdir -p $CI_PROJECT_DIR/.docker
- echo $DOCKER_AUTH_RW > $CI_PROJECT_DIR/.docker/config.json
script:
- |
# Rebuild and push docker images
for image in picodata picodata-diag; do
ci-log-section start "deploy-docker-${image}" Building and pushing docker image ${image}
docker build \
--label GIT_COMMIT=${CI_COMMIT_SHA} \
-t ${REGISTRY}/${image}:${BASE_IMAGE_LATEST} \
docker --config $CI_PROJECT_DIR/.docker push ${REGISTRY}/${image}:${BASE_IMAGE_LATEST}
ci-log-section end "deploy-docker-${image}"
done
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
# Stages of independent stress testing in downstream pipeline
# We cannot move artefacts to the downstream pipeline, so we need to build and upload them to our repo
upload-picodata-to-binary:
stage: stress-test
tags:
- picodata-docker
image:
name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
variables:
VER: $CI_COMMIT_SHORT_SHA
GIT_DEPTH: 100
GIT_SUBMODULE_STRATEGY: recursive
script:
- cargo build --locked --release --features webui
- mv target/release/picodata target/release/picodata-$VER
- curl -v --upload-file target/release/picodata-$VER $RAW_NT_REGISTRY
downstream-stress-test:
# See https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html
stage: stress-test
trigger:
project: picodata/devops/proxmox/sbroad-nt
branch: main
strategy: depend
variables:
VER: $CI_COMMIT_SHORT_SHA
needs:
- job: upload-picodata-to-binary