default:
  tags:
    - docker-picodata
  retry:
    max: 1
    when:
      - scheduler_failure
      - runner_system_failure
      - stuck_or_timeout_failure
      - api_failure

stages:
  - build-base-image
  - test
  - pack
  - docker
  - build-stress-image
  - stress-test
  - deploy

workflow:
  # See https://docs.gitlab.com/ee/ci/jobs/job_control.html#avoid-duplicate-pipelines
  rules:
    # To avoid duplicate pipelines we disable merge request events,
    # leaving only pushes and manual triggering.
    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
      when: never
    - if: $CI_PIPELINE_SOURCE == "push"
    - if: $CI_PIPELINE_SOURCE == "web"
    - if: $CI_PIPELINE_SOURCE == "schedule"

variables:
  REGISTRY: docker-public.binary.picodata.io
  BASE_IMAGE: ${REGISTRY}/picodata-build-base
  BASE_IMAGE_LATEST: latest
  MAIN_BRANCH: &main-branch master
  CARGO_HOME: $CI_PROJECT_DIR/.cargo
  KANIKO_REGISTRY_MIRROR: docker-proxy.binary.picodata.io
  FF_USE_FASTZIP: "true"
  CACHE_COMPRESSION_LEVEL: "fastest"
  GIT_DEPTH: 1
  GET_SOURCES_ATTEMPTS: 3
  PICODATA_DESCRIBE: "24.7.0"
  PARENT_BRANCH: $CI_COMMIT_REF_NAME
  PARENT_CI_COMMIT_SHA: $CI_COMMIT_SHA
  PARENT_PROJECT_PATH: ${CI_PROJECT_PATH}.git
  BASE_IMAGE_NAME: docker-public.binary.picodata.io/sbroad-builder
  BASE_IMAGE_TAG: 0.12.0
  TARANTOOL_VERSION: 2.11.2.159
  FF_NETWORK_PER_BUILD: 1
  RAW_REGISTRY: $RAW_PRIVATE_REGISTRY
  CI_DEBUG_SERVICES: "true"
  # k8s runner config
  KUBERNETES_CPU_REQUEST: 2
  KUBERNETES_MEMORY_REQUEST: "4Gi"


# job:rules explained:
#
# - if build-base changes on master branch (compared to HEAD~1)
#     * build-base-image (with tag latest) and push
#     * test (on base-image:latest)
# - if build-base changes on development branch (compared to master)
#     * build-base-image (with tag sha)
#     * test (on base-image:sha)
# - else (if build-base doesn't change)
#     * skip build-base-image
#     * just test (on base-image:latest)
#
# Anchor syntax explained here:
# https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html
#
.rules:
  - &if-build-base-changes-on-master-branch
    if: ($CI_COMMIT_BRANCH == $MAIN_BRANCH) && ($CI_PIPELINE_SOURCE != "schedule")
    changes:
      paths: &build-base-changes-paths
        - docker-build-base/**
        - .gitlab-ci.yml

  - &if-build-base-changes-on-dev-branch
    if: ($CI_COMMIT_BRANCH != $MAIN_BRANCH) && ($CI_PIPELINE_SOURCE != "schedule")
    changes:
      paths:
        - docker-build-base/**
      compare_to: *main-branch

  - &else {}

.base_cache: &base_cache
  paths:
    - .cargo/
    - target/$TARGET/
  key:
    files:
      - Cargo.lock
    prefix: "base_cache_$TARGET"

.base_node: &base_node
  paths:
    - webui/node_modules/
  key:
    files:
      - webui/yarn.lock
    prefix: "base_node_"

.py_cache: &py_cache
  paths:
    - .venv
  key:
    files:
      - poetry.lock
    prefix: "py_cache_"

build-base-image:
  interruptible: true
  stage: build-base-image
  rules:
    - <<: *if-build-base-changes-on-master-branch
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
    - <<: *if-build-base-changes-on-dev-branch
      variables:
        BASE_IMAGE_TAG: ${PARENT_CI_COMMIT_SHA}
    - if: $CI_COMMIT_TAG
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
    - <<: *else
      when: never
  variables:
    DOCKERFILE: docker-build-base/Dockerfile
    DESTINATION: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
    PUSH_DOCKER: ""
  trigger:
    project: picodata/devops/picodata-in-docker
    branch: main
    strategy: depend

.test:
  interruptible: true
  stage: test
  variables:
    GIT_SUBMODULE_STRATEGY: recursive
    GIT_STRATEGY: fetch
    GIT_DESCRIBE: $PICODATA_DESCRIBE
    SUBMODULES_TO_FETCH_TAGS: "tarantool-sys,tarantool-sys/third_party/luajit"
    RUST_BACKTRACE: full
    KUBERNETES_CPU_REQUEST: 6
    KUBERNETES_MEMORY_REQUEST: "6Gi"
  before_script:
    # Gitlab CI implicitly clones specific refs (e.g. `refs/pipelines/xxxxxxx`),
    # but it doesn't imply fetching tags. We clone them manually with the
    # `git fetch` command.
    #
    # Tags in `tarantool-sys` and `luajit` submodules are necessary for
    # the build scripts. Without them the job fails.
    - &fetch-tags |
      ci-log-section start "fetch-submodule-tags" Fetching tags for submodules
      ./tools/get_tags.py
      ci-log-section end "fetch-submodule-tags"

  # By default "fast-release" profile is used, because of its faster
  # compilation time compared to the "release" profile, which includes
  # additional optimizations.
  # The actual "release" profile is only used when running on master branch.
  rules:
    - if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
      variables:
        RELEASE_PROFILE: &release-profile "release"
        RELEASE_TARGET: &release-target "release"
    - if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
      variables:
        RELEASE_PROFILE: &release-profile "fast-release"
        RELEASE_TARGET: &release-target "fast-release"

.parallel:
  parallel:
    matrix:
      - BUILD_PROFILE: *release-profile
        TARGET: *release-target
      - BUILD_PROFILE: dev
        TARGET: debug

.poetry-install: &poetry-install |
  # Poetry install
  ci-log-section start "poetry-install" Installing python dependencies ...
  POETRY_VIRTUALENVS_IN_PROJECT=1 PIP_NO_CACHE_DIR=true poetry install --no-interaction --ansi
  ci-log-section end "poetry-install"

test-linux:
  extends:
    - .test
    - .parallel
  rules:
    - <<: *if-build-base-changes-on-master-branch
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
        CACHE_POLICY: pull-push
    - if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
        CACHE_POLICY: pull-push
    - <<: *if-build-base-changes-on-dev-branch
      variables:
        BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
        CACHE_POLICY: pull
    - <<: *else
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
        CACHE_POLICY: pull
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
    pull_policy: if-not-present
  cache:
    - <<: *py_cache
      policy: $CACHE_POLICY
    - <<: *base_cache
      policy: $CACHE_POLICY
    - <<: *base_node
      policy: $CACHE_POLICY
  script:
    - |
      # Check rust version consistency
      ci-log-section start "rust-version" Checking rust version consistency ...
      exec 3>&1 # duplicate stdout fd
      grep_rust_version() { echo "$1: $(sed -nr "s/.*rust-version = \"(\S+)\".*/\1/p" $1)"; }
      grep_toolchain() { echo "$1: $(sed -nr "s/.*--default-toolchain (\S+).*/\1/p" $1)"; }
      UNIQUE_VERSIONS=$(
        {
          grep_rust_version Cargo.toml;
          grep_toolchain Makefile;
          grep_toolchain docker-build-base/Dockerfile;
        } \
        | tee /dev/fd/3 \
        | cut -d: -f2- | sort | uniq | wc -l
      );
      ci-log-section end "rust-version"
      if [ "$UNIQUE_VERSIONS" != "1" ]; then
        echo "Error: checking rust version consistency failed"
        exit 1
      fi

    - cargo -V

    - *poetry-install

    - make build-$BUILD_PROFILE CARGO_FLAGS_EXTRA="--timings"
    - |
      make test \
        PYTEST_FLAGS="--junitxml=junit_pytest.xml --with-webui -n$KUBERNETES_CPU_REQUEST" \
        CARGO_FLAGS_EXTRA="--profile=$BUILD_PROFILE"

  artifacts:
    when: always
    paths:
      - junit_pytest.xml
      - ./target/cargo-timings/cargo-timing.html
      - core*
      - target/$TARGET/picodata
    reports:
      junit: junit_pytest.xml

lint:
  stage: test
  interruptible: true
  rules:
    - <<: *if-build-base-changes-on-master-branch
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
    - <<: *if-build-base-changes-on-dev-branch
      variables:
        BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
    - <<: *else
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
    pull_policy: if-not-present
  variables:
    GIT_SUBMODULE_STRATEGY: recursive
    GIT_DESCRIBE: $PICODATA_DESCRIBE
    SUBMODULES_TO_FETCH_TAGS: "tarantool-sys,tarantool-sys/third_party/luajit"
    TARGET: debug
  cache:
    - <<: *py_cache
      policy: pull
    - <<: *base_cache
      policy: pull
    - <<: *base_node
      policy: pull
  script:
    - *fetch-tags
    - *poetry-install
    - make lint
    - cd webui && yarn lint

.test-patch-rules: &test-patch-rules
  rules:
    - if: ($CI_COMMIT_BRANCH == $MAIN_BRANCH) && ($CI_PIPELINE_SOURCE == "schedule")
    - when: manual
      allow_failure: true
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}

test-patch-picodata:
  extends: .test
  <<: *test-patch-rules
  variables:
    GIT_SUBMODULE_STRATEGY: recursive
    BUILD_PROFILE: release
    KUBERNETES_CPU_REQUEST: 8
    KUBERNETES_MEMORY_REQUEST: "8Gi"
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
    pull_policy: always
  cache:
    - <<: *py_cache
      policy: pull
    - <<: *base_cache
      policy: pull
    - <<: *base_node
      policy: pull
  script:
    - *poetry-install
    - ./tools/prepare_source_tree_for_stat_analysis.py apply
    - make build-$BUILD_PROFILE CARGO_FLAGS_EXTRA="--features webui,dynamic_build"
    - |
      make test \
        PYTEST_FLAGS="--junitxml=junit_pytest.xml --with-webui" \
        CARGO_FLAGS_EXTRA="--profile=$BUILD_PROFILE --features dynamic_build"

test-patch-tarantool:
  extends: .test
  <<: *test-patch-rules
  variables:
    GIT_SUBMODULE_STRATEGY: recursive
    VARDIR: tmp/t
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
    pull_policy: always
  script:
    - ./tools/prepare_source_tree_for_stat_analysis.py apply
    - pushd tarantool-sys/ && make -f .test.mk VARDIR=${VARDIR} test-release;
  artifacts:
    when: always
    paths:
      - tarantool-sys/test/${VARDIR}/artifacts/

test-mac-m1:
  extends: .test
  tags:
    - mac-dev-m1
  script:
    # Gitlab doesnt know about $HOME. If specified in variables it becomes empty
    - export CARGO_HOME=$HOME/.cargo
    - cargo -V
    - cargo build --locked
    - cargo build --features webui --locked
    # There are no Rust tests for `webui` feature.
    # It will be checked during integration tests.
    - cargo test --locked

    - cargo fmt -- -v --check
    - cargo clippy --version
    - cargo clippy --features "load_test webui error_injection" -- --deny clippy::all --no-deps
    # - *poetry-install
    # - poetry run pytest --numprocesses auto -v
    # - make lint
    # - |

.helm:
  interruptible: true
  stage: test
  needs: []
  rules:
    - if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
      changes:
        compare_to: *main-branch
        paths:
          - helm/picodata.Dockerfile
          - helm/picodata-distroless.Dockerfile
          - helm/docker-compose.yml
      variables:
        PUSH_DOCKER: "--no-push"
    - if: ($CI_COMMIT_BRANCH == $MAIN_BRANCH) && ($CI_PIPELINE_SOURCE == "schedule")
      when: on_success
      variables:
        PUSH_DOCKER: ""
    - when: manual
      allow_failure: true

helm-image:
  extends: .helm
  variables:
    KUBERNETES_CPU_REQUEST: 8
    KUBERNETES_MEMORY_REQUEST: "8Gi"
    MULTIARCH: "true"
  parallel:
    matrix:
      - DOCKERFILE: helm/picodata.Dockerfile
        DESTINATION: ${REGISTRY}/picodata:master
      - DOCKERFILE: helm/picodata-distroless.Dockerfile
        DESTINATION: ${REGISTRY}/picodata:master-distroless
  trigger:
    project: picodata/devops/picodata-in-docker
    branch: main
    strategy: depend

gamayun-prepare:
  extends: .test
  when: manual
  rules:
    - <<: *if-build-base-changes-on-master-branch
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
    - <<: *if-build-base-changes-on-dev-branch
      variables:
        BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
    - <<: *else
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
    pull_policy: if-not-present
  script:
    - export CARGO_HOME=$HOME/.cargo
    - cargo -V
    - cargo clippy --locked --message-format=json > clippy.json
  artifacts:
    when: always
    paths:
      - clippy.json

gamayun-run:
  extends: .test
  needs: ["gamayun-prepare"]
  tags:
    - picodata-shell
  script:
    # create an ssh tunnel to gamayun server to allow the report uploading
    - TUNNEL="ssh -4 -L 9000:localhost:9000 sonar-reports -N -f"
    - eval $TUNNEL
    - ls -la
    - |
      # svace patches is more iportant than gamayun and order of applying is important
      # first - svace, second - gamayun
      ./tools/prepare_source_tree_for_stat_analysis.py apply
    - |
      # TODO consider moving this to the script as well, this may delete some html
      # license files which is probably not intended
      find . -type d -name 'test*' -o -name 'doc' -o -name 'docs' | xargs -n1 rm -rvf
      find tarantool-sys/third_party/ -type f -name '*.htm*' | xargs -n 1 rm -rfv
      find tarantool-sys/vendor/ -type f -name '*.htm*' | xargs -n 1 rm -rfv
      find . -type d -name stress-test | xargs -n 1 rm -rvf
      find . -type d -name integration-tests | xargs -n 1 rm -rvf
      find . -type d -name 'example*' | xargs -n 1 rm -rvf
      find . -type d -name 'docsrc' | xargs -n 1 rm -rvf
      find . -name '*.md' | xargs -n 1 rm -rvf
      find http tarantool-sys vshard -type d -name .github | xargs -n 1 rm -rfv
    - |
      docker run --rm -t \
        -v $PWD:/tmp/src:rw \
        -e "SONAR_OPS=-Dsonar.python.version=3 -Dsonar.login=${SONAR} -Dsonar.projectKey=Picodata-CI -Dsonar.exclusions=**/*.mod,osv-sonar.json" \
        -e "SONAR_SCANNER_OPTS="-Xmx4096m"" \
        -e "CARGO_CLIPPY_FILE=clippy.json" \
        -u $(id -u):$(id -g) --ulimit nofile=100000:100000 --network=host \
        docker.binary.picodata.io/gamayun

build-vm-image:
  interruptible: true
  stage: test
  when: manual
  inherit:
    variables: false
  variables:
    # Not CI_COMMIT_BRANCH because CI_COMMIT_BRANCH is not available for tags
    BRANCH: $CI_COMMIT_REF_NAME
  trigger:
    project: picodata/picodata/picodata-fstek-vmbuilder
    branch: main
    strategy: depend

pack-on-tag:
  stage: pack
  rules:
    - if: $CI_PIPELINE_SOURCE == "web"
    - if: $CI_COMMIT_TAG
  variables:
    PROJECT_TARGET: "picodata"
    TYPE: "RELEASE"
    BRANCH_TARGET: $CI_COMMIT_TAG
  inherit:
    variables:
      - TYPE
      - PROJECT_TARGET
      - BRANCH_TARGET
  trigger:
    project: "picodata/devops/builder"
    strategy: depend

.deploy-docker-tmpl:
  stage: docker
  variables:
    PUSH_DOCKER: ""
    MULTIARCH: "true"
  rules:
    - if: $CI_COMMIT_TAG
      variables:
        PARENT_BRANCH: ${CI_COMMIT_TAG}

deploy-docker:
  extends: .deploy-docker-tmpl
  parallel:
    matrix:
      - DOCKERFILE: helm/picodata.Dockerfile
        DESTINATION: ${REGISTRY}/picodata:${CI_COMMIT_TAG}
      - DOCKERFILE: helm/picodata-distroless.Dockerfile
        DESTINATION: ${REGISTRY}/picodata:${CI_COMMIT_TAG}-distroless
      - DOCKERFILE: docker-build-base/sbroad.Dockerfile
        DESTINATION: ${BASE_IMAGE_NAME}:${CI_COMMIT_TAG}

  trigger:
    project: picodata/devops/picodata-in-docker
    branch: main
    strategy: depend

docker-compose:
  stage: docker
  needs: ["helm-image: [helm/picodata.Dockerfile, ${REGISTRY}/picodata:master]"]
  tags:
    - picodata-shell
  rules:
    - if: ($CI_COMMIT_BRANCH != $MAIN_BRANCH) && ($CI_PIPELINE_SOURCE == "schedule")
      changes:
        compare_to: *main-branch
        paths:
          - helm/docker-compose.yml
    - if: ($CI_COMMIT_BRANCH == $MAIN_BRANCH) && ($CI_PIPELINE_SOURCE == "schedule")
      when: on_success
    - when: manual
      allow_failure: true
  script:
    - mkdir --mode=777 pico/
    - docker-compose -f helm/docker-compose.yml up -d --wait
    - sleep 30 # the cluster does not manage to assemble quickly enough
    - count=$(psql "user=admin host=127.0.0.1 port=55432 password=T0psecret sslmode=disable" -t -A -c "select count(*) from \"_pico_instance\"") && if [[ $count -eq 4 ]] ; then echo "OK"; else echo count=$count; exit 2; fi
  after_script:
    - docker-compose -f helm/docker-compose.yml rm -fsv
    - sudo rm -rf pico/

# Stages of independent stress testing in downstream pipeline
# We cannot move artefacts to the downstream pipeline, so we need to build and upload them to our repo
.upload-picodata-to-binary:
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_LATEST}
  variables:
    VER: $CI_COMMIT_SHORT_SHA
    GIT_SUBMODULE_STRATEGY: recursive
    GIT_DESCRIBE: $PICODATA_DESCRIBE
  before_script:
    # To conduct stress tests, we require the most recent picodata tag. However,
    # creating a shallow copy with $GIT_DEPTH commits might not include this tag.
    # Fetching all commits takes too much time, so we incrementally download more
    # chunks of commits until we find the required tag.
    - *fetch-tags
  script:
    - cargo build --locked --release --features webui
    - mv target/release/picodata target/release/picodata-$VER
    - curl -v --upload-file target/release/picodata-$VER $RAW_NT_REGISTRY

upload-picodata-to-binary-stress-test:
  stage: stress-test
  extends: .upload-picodata-to-binary
  rules:
    - if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
      when: always
    - if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
      when: manual
      allow_failure: true

downstream-stress-test:
  interruptible: true
  # See https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html
  stage: stress-test
  rules:
    - if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
      when: always
    - if: $CI_COMMIT_BRANCH != $MAIN_BRANCH
      when: manual
      allow_failure: true
  trigger:
    project: picodata/devops/proxmox/sbroad-nt
    branch: main
    strategy: depend
  variables:
    VER: $CI_COMMIT_SHORT_SHA
  needs:
    - job: upload-picodata-to-binary-stress-test

upload-picodata-to-binary-front-deploy:
  stage: deploy
  extends: .upload-picodata-to-binary
  needs: []
  when: manual

downstream-front-deploy:
  interruptible: true
  # See https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html
  stage: deploy
  allow_failure: true
  trigger:
    project: picodata/devops/pico-servers/front
    branch: main
    strategy: depend
  variables:
    VER: $CI_COMMIT_SHORT_SHA
  needs:
    - job: upload-picodata-to-binary-front-deploy

publish-picodata-plugin:
  stage: pack
  when: manual
  rules:
    - <<: *if-build-base-changes-on-master-branch
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
    - if: $CI_COMMIT_BRANCH == $MAIN_BRANCH
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
    - <<: *if-build-base-changes-on-dev-branch
      variables:
        BASE_IMAGE_TAG: ${CI_COMMIT_SHA}
    - <<: *else
      variables:
        BASE_IMAGE_TAG: ${BASE_IMAGE_LATEST}
  image:
    name: ${BASE_IMAGE}:${BASE_IMAGE_TAG}
    pull_policy: if-not-present
  script:
    - if [ -z "$CARGO_REGISTRY_TOKEN" ]; then echo "Variable CARGO_TOKEN must be available, check that branch is protected" 1>&2 && exit 1; fi
    - make publish-picodata-plugin


.kaniko_image: &kaniko_image
  image:
    name: docker-public.binary.picodata.io/kaniko-project/executor:v1.23.1-debug
    entrypoint: ['']
    pull_policy: [if-not-present]
  rules:
    - if: $CI_PIPELINE_SOURCE != "schedule"
    - when: manual
      allow_failure: true
  tags:
    - docker-k8s
  before_script:
    - mkdir -p /kaniko/.docker
    - echo "$DOCKER_AUTH_CONFIG" > /kaniko/.docker/config.json

build-stress-image:
  stage: build-stress-image
  <<: *kaniko_image
  needs: []
  variables:
    GIT_USERNAME: $CI_REGISTRY_USER
    GIT_PASSWORD: $CI_REGISTRY_PASSWORD
    DOCKERFILE: docker-build-base/docker.stress
  script:
    - BASE_IMAGE_TAG=$(cd sbroad && git describe --tags --abbrev=0 2>/dev/null || echo "latest")
    - >
      /kaniko/executor --context $CI_PROJECT_DIR --dockerfile ${DOCKERFILE}
      --build-arg COMMIT_HASH=${CI_COMMIT_SHA} --build-arg BASE_IMAGE_TAG=${BASE_IMAGE_TAG} ${PUSH_DOCKER}
      --cache=false --cache-run-layers=true --single-snapshot --compressed-caching=false --use-new-run --snapshot-mode=redo --cleanup
      --destination $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA

.test-sbroad:
  stage: test
  tags:
    - docker-k8s
  rules:
    - if: $CI_PIPELINE_SOURCE != "schedule"
  image:
    name: $BASE_IMAGE_NAME:$BASE_IMAGE_TAG
    pull_policy: [always]
  needs: []
  before_script:
    - pushd sbroad

lint-sbroad:
  extends: [.test-sbroad]
  script:
    - make lint

unit-sbroad:
  extends: [.test-sbroad]
  script:
    - make test

integration-sbroad:
  extends: [.test-sbroad]
  variables:
    KUBERNETES_CPU_REQUEST: 6
    KUBERNETES_MEMORY_REQUEST: "6Gi"
  script:
    - make test_integration
  artifacts:
    paths:
      - sbroad-cartridge/test_app/tmp/tarantool.log
    expire_in: 1 week

bench-sbroad:
  extends: [.test-sbroad]
  script:
    - make bench_check

stress_tests:
  parallel:
    matrix:
      - STRESS_TEST:
        - projection
        - projection_wide
        - groupby
        - insert
  tags:
    - docker-k8s
  rules:
    - if: $CI_PIPELINE_SOURCE != "schedule"
  variables:
    TNT_HOST: tarantool
    STRESS_TEST: insert
    KUBERNETES_CPU_REQUEST: 6
    KUBERNETES_MEMORY_REQUEST: "6Gi"
  services:
    - name: $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
      alias: tarantool
      pull_policy: always
  image: docker-public.binary.picodata.io/k6_tnt:latest
  stage: stress-test
  allow_failure: true
  script:
    - echo "STRESS_TEST=${STRESS_TEST}"
    # wait while tnt cluster is started
    - ./tools/check_tnt.sh
    - mkdir -p sbroad/sbroad-cartridge/stress-test/$CI_COMMIT_BRANCH/${STRESS_TEST}/
    - k6 run -u 10 -d 1m -e HOST=${TNT_HOST} sbroad/sbroad-cartridge/stress-test/${STRESS_TEST}/k6.js --summary-export sbroad/sbroad-cartridge/stress-test/$CI_COMMIT_BRANCH/${STRESS_TEST}/k6_summary.json
  needs: [build-stress-image]
  artifacts:
    paths:
      - sbroad/sbroad-cartridge/stress-test/$CI_COMMIT_BRANCH/${STRESS_TEST}/k6_summary.json
    expire_in: 1 hour
    when: always

store-stress-results-for-main:
  rules:
    - if: ($CI_COMMIT_BRANCH ==  $CI_DEFAULT_BRANCH) && ($CI_PIPELINE_SOURCE != "schedule")
  stage: stress-test
  image: $BASE_IMAGE_NAME:$BASE_IMAGE_TAG
  script:
    - |
      tar -czvf sbroad-main-test-results.tgz -C sbroad/sbroad-cartridge/stress-test/ .
      curl -f -H "Authorization: Basic $RAW_AUTH_RW" --upload-file sbroad-main-test-results.tgz $RAW_REGISTRY/sbroad-stress-tests/
  needs:
    - "stress_tests: [projection]"
    - "stress_tests: [projection_wide]"
    - "stress_tests: [groupby]"
    - "stress_tests: [insert]"

diff_stress-results:
  image: $BASE_IMAGE_NAME:$BASE_IMAGE_TAG
  stage: stress-test
  rules:
    - if: $CI_PIPELINE_SOURCE != "schedule"
      when: manual
      allow_failure: true
  variables:
    FARCH: "sbroad-main-test-results.tgz"
  script:
    - |
      curl $RAW_REGISTRY/sbroad-stress-tests/$FARCH -o $FARCH
      file $FARCH
      set -x
      if [ ! -f ${FARCH} -o $(file -i ${FARCH} | grep gzip | wc -l) -eq 0 ] ; then
        echo "Error: archive ${FARCH} not found on binary"
        exit 1
      fi
      tar -xvf $FARCH -C sbroad/sbroad-cartridge/stress-test/
      cd sbroad/sbroad-cartridge/stress-test/
      find -name k6_summary.json
      for D in $(find . -name k6_summary.json -exec sh -c "dirname {} | sed 's/.*\///g'" \; ); do
        echo "Branch = $D"
        if [ "$D" != "main" -a -f main/$D/k6_summary.json -a -f $CI_COMMIT_BRANCH/$D/k6_summary.json ]; then
        tarantool compare.lua main/$D/k6_summary.json $CI_COMMIT_BRANCH/$D/k6_summary.json
        elif [ "$D" == main ]; then
          echo "skipped"
        else
          echo "Error: stress-test results not found!"; exit 2;
        fi
      done
  needs:
    - "stress_tests: [projection]"
    - "stress_tests: [projection_wide]"
    - "stress_tests: [groupby]"
    - "stress_tests: [insert]"
  artifacts:
    paths:
      - sbroad-cartridge/stress-test/**/**/k6_summary.json
    expire_in: 1 week
    when: always

deploy-luarocks:
  stage: deploy
  rules:
    - if: $CI_COMMIT_TAG
  before_script:
    - eval $(ssh-agent -s)
    - echo "$DEPLOY_PROD_SSH_KEY" | base64 -d | ssh-add -
    - |
      if [ "$DEPLOY_PROD_SSH_KEY" == "" ]; then
        echo "Error: Access to push in docker registry only for protected tag or branch!"
        exit 1
      fi
  script:
    - make release_rock
    - echo "Deploying luarocks..."
    - scp -o stricthostkeychecking=no sbroad*rock luarocks@94.26.239.246:/data/nginx/www/packrepo/luarocks
    - ssh -o stricthostkeychecking=no luarocks@94.26.239.246 "luarocks-admin make_manifest /data/nginx/www/packrepo/luarocks"