diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8d774885215..fa12f2f70e1 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,14 +1,15 @@ * @centreon/owners-cpp -.github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines -selinux/** @centreon/owners-pipelines - -tests/** @centreon/owners-robot-e2e +*.pm @centreon/owners-perl +*.pl @centreon/owners-perl gorgone/ @centreon/owners-perl gorgone/docs/ @centreon/owners-doc +.github/** @centreon/owners-pipelines +**/packaging/** @centreon/owners-pipelines +**/selinux/** @centreon/owners-pipelines + +tests/** @centreon/owners-robot-e2e + gorgone/tests/robot/config/ @centreon/owners-perl -*.pm @centreon/owners-perl -*.pl @centreon/owners-perl diff --git a/.github/actions/deb-delivery/action.yml b/.github/actions/deb-delivery/action.yml index 46b6c5ec189..1c6a3850ba0 100644 --- a/.github/actions/deb-delivery/action.yml +++ b/.github/actions/deb-delivery/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -49,12 +49,12 @@ runs: echo "[DEBUG] - Version: ${{ inputs.version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index 8cbca5c8073..663b1f35549 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -63,12 +63,12 @@ runs: echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -87,32 +87,32 @@ runs: mv "$FILE" "$ARCH" done - # Build upload target path based on release_cloud and release_type values + # Build upload target path based on is_cloud and release_type values # if cloud + hotfix or cloud + release, deliver to internal testing- # if cloud + develop, delivery to internal unstable # if non-cloud, delivery to onprem testing or unstable # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL - if [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} == "hotfix" || ${{ inputs.release_type }} == "release" ) ]]; then + if [[ "${{ inputs.is_cloud }}" == "true" && ( "${{ inputs.release_type }}" == "hotfix" || "${{ inputs.release_type }}" == "release" ) ]]; then echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL - elif [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} != "hotfix" && ${{ inputs.release_type }} != "release" ) ]]; then + elif [[ "${{ inputs.is_cloud }}" == "true" && ( "${{ inputs.release_type }}" != "hotfix" && "${{ inputs.release_type }}" != "release" ) ]]; then echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD - elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + elif [[ "${{ inputs.is_cloud }}" == "false" ]]; then echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." ROOT_REPO_PATHS="rpm-standard" UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" # NOT VALID, DO NOT DELIVER else - echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and release_cloud [${{ inputs.release_cloud }}]" + echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and is_cloud [${{ inputs.is_cloud }}]" exit 1 fi @@ -141,12 +141,12 @@ runs: echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index 950b9cb8e27..f328ac6bab8 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -45,6 +45,12 @@ runs: using: composite steps: + - name: Parse distrib name + id: parse-distrib + uses: ./.github/actions/parse-distrib + with: + distrib: ${{ inputs.distrib }} + - name: Import gpg key env: RPM_GPG_SIGNING_KEY: ${{ inputs.rpm_gpg_key }} @@ -62,13 +68,13 @@ runs: export ARCH="${{ inputs.arch }}" if [ "${{ inputs.package_extension }}" = "rpm" ]; then - export DIST=".${{ inputs.distrib }}" + export DIST="${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}" else export DIST="" if [ "${{ inputs.stability }}" = "unstable" ] || [ "${{ inputs.stability }}" = "canary" ]; then - export RELEASE="$RELEASE~${{ inputs.distrib }}" + export RELEASE="$RELEASE${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}" else - export RELEASE="1~${{ inputs.distrib }}" + export RELEASE="1${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}" fi fi diff --git a/.github/actions/parse-distrib/action.yml b/.github/actions/parse-distrib/action.yml new file mode 100644 index 00000000000..39888187619 --- /dev/null +++ b/.github/actions/parse-distrib/action.yml @@ -0,0 +1,45 @@ +name: "parse-distrib" +description: "parse distrib name." +inputs: + distrib: + description: "The distribution name" + required: true +outputs: + package_distrib_separator: + description: "Separator between package version and distrib number" + value: ${{ steps.parse-distrib.outputs.package_distrib_separator }} + package_distrib_name: + description: "Distribution suffix in package name" + value: ${{ steps.parse-distrib.outputs.package_distrib_name }} + +runs: + using: "composite" + steps: + - name: Parse distrib + id: parse-distrib + run: | + if [[ "${{ inputs.distrib }}" == "centos7" || "${{ inputs.distrib }}" == "el7" ]]; then + PACKAGE_DISTRIB_SEPARATOR="." + PACKAGE_DISTRIB_NAME="el7" + elif [[ "${{ inputs.distrib }}" == "alma8" || "${{ inputs.distrib }}" == "el8" ]]; then + PACKAGE_DISTRIB_SEPARATOR="." + PACKAGE_DISTRIB_NAME="el8" + elif [[ "${{ inputs.distrib }}" == "alma9" || "${{ inputs.distrib }}" == "el9" ]]; then + PACKAGE_DISTRIB_SEPARATOR="." + PACKAGE_DISTRIB_NAME="el9" + elif [[ "${{ inputs.distrib }}" == "bullseye" ]]; then + PACKAGE_DISTRIB_SEPARATOR="+" + PACKAGE_DISTRIB_NAME="deb11u1" + elif [[ "${{ inputs.distrib }}" == "bookworm" ]]; then + PACKAGE_DISTRIB_SEPARATOR="+" + PACKAGE_DISTRIB_NAME="deb12u1" + elif [[ "${{ inputs.distrib }}" == "jammy" ]]; then + PACKAGE_DISTRIB_SEPARATOR="-" + PACKAGE_DISTRIB_NAME="0ubuntu.22.04" + else + echo "::error::Distrib ${{ inputs.distrib }} cannot be parsed" + exit 1 + fi + echo "package_distrib_separator=$PACKAGE_DISTRIB_SEPARATOR" >> $GITHUB_OUTPUT + echo "package_distrib_name=$PACKAGE_DISTRIB_NAME" >> $GITHUB_OUTPUT + shell: bash diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml index 4432aee5663..32523490e65 100644 --- a/.github/actions/promote-to-stable/action.yml +++ b/.github/actions/promote-to-stable/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -34,6 +34,12 @@ runs: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + - name: Parse distrib name + id: parse-distrib + uses: ./.github/actions/parse-distrib + with: + distrib: ${{ inputs.distrib }} + - name: Promote RPM packages to stable if: ${{ startsWith(inputs.distrib, 'el') }} run: | @@ -42,7 +48,7 @@ runs: # DEBUG echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" # Cloud specific promote @@ -56,15 +62,15 @@ runs: # Search for testing packages candidate for promote for ARCH in "noarch" "x86_64"; do - # Build search path based on release_cloud and release_type values + # Build search path based on is_cloud and release_type values # if cloud, search in testing- path # if non-cloud, search in the testing usual path - if [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "release" ]]; then + if [[ "${{ inputs.is_cloud }}" == "true" && "${{ inputs.release_type }}" == "hotfix" ]] || [[ "${{ inputs.is_cloud }}" == "true" && "${{ inputs.release_type }}" == "release" ]]; then SEARCH_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/testing-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}" - elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + elif [[ "${{ inputs.is_cloud }}" == "false" ]]; then SEARCH_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/testing/$ARCH/${{ inputs.module_name }}" else - echo "Invalid combination of release_type and release_cloud" + echo "Invalid combination of release_type and is_cloud" fi echo "[DEBUG] - Get path of $ARCH testing artifacts to promote to stable." @@ -81,7 +87,7 @@ runs: # Build target path based on ARCH echo "[DEBUG] - Build $ARCH target path." - TARGET_PATH="$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/RPMS/${{ inputs.module_name }}/" + TARGET_PATH="$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/stable/$ARCH/RPMS/${{ inputs.module_name }}/" echo "[DEBUG] - Target path: $TARGET_PATH" # Download candidates for promote @@ -91,10 +97,15 @@ runs: jf rt download $ARTIFACT --flat done + DRY_RUN_FLAG="--dry-run" + if [ "${{ inputs.stability }}" == "stable" ]; then + DRY_RUN_FLAG="" + fi + # Upload previously downloaded candidates to TARGET_PATH for ARTIFACT_DL in $(dir|grep -E "*.rpm"); do echo "[DEBUG] - Promoting (upload) $ARTIFACT_DL to stable $TARGET_PATH." - jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --flat + jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --flat $DRY_RUN_FLAG done # Cleanup before next round of candidates @@ -123,7 +134,7 @@ runs: SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH-testing/pool/${{ inputs.module_name }}/*.deb | jq -r '.[].path') ;; *) - SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH-testing/pool/${{ inputs.module_name }}/*${{ inputs.distrib }}*.deb | jq -r '.[].path') + SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH-testing/pool/${{ inputs.module_name }}/*${{ steps.parse-distrib.outputs.package_distrib_name }}*.deb | jq -r '.[].path') ;; esac @@ -132,12 +143,12 @@ runs: echo "[DEBUG] - Source path found: $SRC_PATH" done else - echo "[DEBUG] - No source path found." - continue + echo "::warning::No source path found." + exit 0 fi echo "[DEBUG] - Build target path." - TARGET_PATH="$ROOT_REPO_PATH-${{ inputs.stability }}/pool/${{ inputs.module_name }}/" + TARGET_PATH="$ROOT_REPO_PATH-stable/pool/${{ inputs.module_name }}/" echo "[DEBUG] - Target path: $TARGET_PATH" echo "[DEBUG] - Promoting DEB testing artifacts to stable." @@ -151,16 +162,20 @@ runs: ARTIFACT_SEARCH_PATTERN=".+\.deb" ;; *) - ARTIFACT_SEARCH_PATTERN=".+${{ inputs.distrib }}.+\.deb" + ARTIFACT_SEARCH_PATTERN=".+${{ steps.parse-distrib.outputs.package_distrib_name }}.+\.deb" ;; esac + DRY_RUN_FLAG="--dry-run" + if [ "${{ inputs.stability }}" == "stable" ]; then + DRY_RUN_FLAG="" + fi + for ARTIFACT_DL in $(dir -1|grep -E $ARTIFACT_SEARCH_PATTERN); do ARCH=$(echo $ARTIFACT_DL | cut -d '_' -f3 | cut -d '.' -f1) echo "[DEBUG] - Promoting (upload) $ARTIFACT_DL to stable $TARGET_PATH." - jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --deb "${{ inputs.distrib }}/main/$ARCH" --flat + jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --deb "${{ inputs.distrib }}/main/$ARCH" --flat $DRY_RUN_FLAG done rm -f *.deb - shell: bash diff --git a/.github/actions/rpm-delivery/action.yml b/.github/actions/rpm-delivery/action.yml index 3174c753300..b1fbc79e2d7 100644 --- a/.github/actions/rpm-delivery/action.yml +++ b/.github/actions/rpm-delivery/action.yml @@ -22,7 +22,7 @@ inputs: release_type: description: "Type of release (hotfix, release)" required: true - release_cloud: + is_cloud: description: "Release context (cloud or not cloud)" required: true @@ -61,12 +61,12 @@ runs: echo "[DEBUG] - Version: ${{ inputs.version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" - echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - is_cloud: ${{ inputs.is_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z "${{ inputs.is_cloud }}" ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -85,32 +85,32 @@ runs: mv "$FILE" "$ARCH" done - # Build upload target path based on release_cloud and release_type values + # Build upload target path based on is_cloud and release_type values # if cloud + hotfix or cloud + release, deliver to internal testing- # if cloud + develop, delivery to internal unstable # if non-cloud, delivery to onprem testing or unstable # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL - if [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_type }} == "release" ]]); then + if [[ "${{ inputs.is_cloud }}" == "true" ]] && ([[ "${{ inputs.release_type }}" == "hotfix" ]] || [[ "${{ inputs.release_type }}" == "release" ]]); then echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL - elif [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} != "hotfix" ]] || [[ ${{ inputs.release_type }} != "release" ]]); then + elif [[ "${{ inputs.is_cloud }}" == "true" ]] && ([[ "${{ inputs.release_type }}" != "hotfix" ]] || [[ "${{ inputs.release_type }}" != "release" ]]); then echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD - elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + elif [[ "${{ inputs.is_cloud }}" == "false" ]]; then echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." ROOT_REPO_PATHS="rpm-standard" UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" # ANYTHING ELSE else - echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and release_cloud [${{ inputs.release_cloud }}]" + echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and is_cloud [${{ inputs.is_cloud }}]" exit 1 fi diff --git a/.github/scripts/windows-agent-compile.ps1 b/.github/scripts/windows-agent-compile.ps1 index 02af28abb7e..b3246b59ed3 100644 --- a/.github/scripts/windows-agent-compile.ps1 +++ b/.github/scripts/windows-agent-compile.ps1 @@ -24,6 +24,14 @@ Write-Host $env:VCPKG_BINARY_SOURCES $current_dir = $pwd.ToString() +#install recent version of 7zip needed by some packages +Write-Host "install 7zip" + +#download 7zip +Invoke-WebRequest -Uri "https://www.7-zip.org/a/7z2408-x64.msi" -OutFile "7z2408-x64.msi" +#install 7zip +Start-Process 'msiexec.exe' -ArgumentList '/I "7z2408-x64.msi" /qn' -Wait + #get cache from s3 $files_to_hash = "vcpkg.json", "custom-triplets\x64-windows.cmake", "CMakeLists.txt", "CMakeListsWindows.txt" $files_content = Get-Content -Path $files_to_hash -Raw @@ -32,12 +40,13 @@ $writer = [System.IO.StreamWriter]::new($stringAsStream) $writer.write($files_content -join " ") $writer.Flush() $stringAsStream.Position = 0 +$vcpkg_release = "2024.10.21" $vcpkg_hash = Get-FileHash -InputStream $stringAsStream -Algorithm SHA256 | Select-Object Hash -$file_name = "windows-agent-vcpkg-dependencies-cache-" + $vcpkg_hash.Hash +$file_name = "windows-agent-vcpkg-dependencies-cache-" + $vcpkg_hash.Hash + "-" + $vcpkg_release $file_name_extension = "${file_name}.7z" #try to get compiled dependenciesfrom s3 -Write-Host "try to download compiled dependencies from s3" +Write-Host "try to download compiled dependencies from s3: $file_name_extension" aws --quiet s3 cp s3://centreon-collect-robot-report/$file_name_extension $file_name_extension if ( $? -ne $true ) { #no => generate @@ -46,7 +55,7 @@ if ( $? -ne $true ) { Write-Host "#######################################################################################################################" Write-Host "install vcpkg" - git clone --depth 1 -b 2024.07.12 https://github.com/microsoft/vcpkg.git + git clone --depth 1 -b $vcpkg_release https://github.com/microsoft/vcpkg.git cd vcpkg bootstrap-vcpkg.bat cd $current_dir @@ -57,22 +66,24 @@ if ( $? -ne $true ) { Write-Host "compile vcpkg dependencies" vcpkg install --vcpkg-root $env:VCPKG_ROOT --x-install-root build_windows\vcpkg_installed --x-manifest-root . --overlay-triplets custom-triplets --triplet x64-windows - Write-Host "Compress binary archive" - 7z a $file_name_extension build_windows\vcpkg_installed - Write-Host "Upload binary archive" - aws s3 cp $file_name_extension s3://centreon-collect-robot-report/$file_name_extension - Write-Host "create CMake files" + if ( $? -eq $true ) { + Write-Host "Compress binary archive" + 7z a $file_name_extension build_windows\vcpkg_installed + Write-Host "Upload binary archive" + aws s3 cp $file_name_extension s3://centreon-collect-robot-report/$file_name_extension + } } else { 7z x $file_name_extension Write-Host "Create cmake files from binary-cache downloaded without use vcpkg" } - +Write-Host "create CMake files" cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTING=On -DWINDOWS=On -DBUILD_FROM_CACHE=On -S. -DVCPKG_CRT_LINKAGE=dynamic -DBUILD_SHARED_LIBS=OFF -Bbuild_windows -Write-Host "build agent and tests" + +Write-Host "------------- build agent only ---------------" cmake --build build_windows --config Release diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index d12a78d31c7..780a9c2b39e 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -57,14 +57,14 @@ on: - "!**/test/**" jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt unit-test: - needs: [get-version] - if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment] + if: ${{ ! contains(fromJson('["stable"]'), needs.get-environment.outputs.stability) }} runs-on: [self-hosted, collect] strategy: @@ -81,8 +81,8 @@ jobs: uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} - name: Test ${{ matrix.image }} uses: ./.github/actions/runner-docker @@ -90,25 +90,25 @@ jobs: registry_url: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} script_name: /src/.github/scripts/collect-unit-tests image_name: centreon-collect-${{ matrix.image }} - image_version: ${{ needs.get-version.outputs.img_version }} + image_version: ${{ needs.get-environment.outputs.img_version }} package: - needs: [get-version] - if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment] + if: ${{ ! contains(fromJson('["stable"]'), needs.get-environment.outputs.stability) }} uses: ./.github/workflows/package-collect.yml with: - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} - release: ${{ needs.get-version.outputs.release }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} + release: ${{ needs.get-environment.outputs.release }} commit_hash: ${{ github.sha }} - stability: ${{ needs.get-version.outputs.stability }} + stability: ${{ needs.get-environment.outputs.stability }} secrets: inherit deliver-sources: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, package] + if: ${{ contains(fromJson('["stable"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch' }} steps: - name: Checkout sources @@ -122,14 +122,13 @@ jobs: bucket_directory: centreon-collect module_directory: centreon-collect module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} deliver-rpm: - if: ${{ contains(fromJson('["testing", "stable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] - environment: ${{ needs.get-version.outputs.environment }} + if: ${{ contains(fromJson('["testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] runs-on: [self-hosted, common] strategy: matrix: @@ -150,17 +149,16 @@ jobs: with: module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: - if: ${{ contains(fromJson('["testing", "stable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] - environment: ${{ needs.get-version.outputs.environment }} + if: ${{ contains(fromJson('["testing", "stable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] runs-on: [self-hosted, common] strategy: matrix: @@ -185,16 +183,20 @@ jobs: with: module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, deliver-rpm, deliver-deb] + if: | + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -210,8 +212,8 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} diff --git a/.github/workflows/check-status.yml b/.github/workflows/check-status.yml index 36799865754..b56f2253b55 100644 --- a/.github/workflows/check-status.yml +++ b/.github/workflows/check-status.yml @@ -39,7 +39,7 @@ jobs: script: | await exec.exec("sleep 20s"); - for (let i = 0; i < 60; i++) { + for (let i = 0; i < 120; i++) { const failure = []; const cancelled = []; const pending = []; @@ -47,7 +47,7 @@ jobs: const result = await github.rest.checks.listSuitesForRef({ owner: context.repo.owner, repo: context.repo.repo, - ref: "${{ github.event.pull_request.head.sha }}" + ref: "${{ github.head_ref }}" }); result.data.check_suites.forEach(({ app: { slug }, conclusion, id}) => { if (slug === 'github-actions') { @@ -86,8 +86,10 @@ jobs: core.summary.addList(failedCheckRuns); core.summary.write() - core.setFailed(`${failure.length} workflow(s) failed`); - return; + if (failedCheckRuns.length > 0) { + core.setFailed(`${failedCheckRuns.length} job(s) failed`); + return; + } } if (pending.length === 1) { diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index 0e2e4189fb0..09a7c744558 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -17,13 +17,13 @@ on: - '.github/docker/Dockerfile.centreon-collect-*' jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt create-and-push-docker: - needs: [get-version] + needs: [get-environment] strategy: fail-fast: false @@ -32,59 +32,59 @@ jobs: - runner: collect dockerfile: centreon-collect-alma8 image: centreon-collect-alma8 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-alma9 image: centreon-collect-alma9 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-alma9-test image: centreon-collect-alma9-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-mysql-alma9 image: centreon-collect-mysql-alma9 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-mysql-alma9-test image: centreon-collect-mysql-alma9-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-debian-bullseye image: centreon-collect-debian-bullseye - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-debian-bullseye-test image: centreon-collect-debian-bullseye-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-debian-bookworm image: centreon-collect-debian-bookworm - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect dockerfile: centreon-collect-debian-bookworm-test image: centreon-collect-debian-bookworm-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect dockerfile: centreon-collect-ubuntu-jammy image: centreon-collect-ubuntu-jammy - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bullseye image: centreon-collect-debian-bullseye-arm64 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bullseye-test image: centreon-collect-debian-bullseye-arm64-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bookworm image: centreon-collect-debian-bookworm-arm64 - tag: ${{ needs.get-version.outputs.img_version }} + tag: ${{ needs.get-environment.outputs.img_version }} - runner: collect-arm64 dockerfile: centreon-collect-debian-bookworm-test image: centreon-collect-debian-bookworm-arm64-test - tag: ${{ needs.get-version.outputs.test_img_version }} + tag: ${{ needs.get-environment.outputs.test_img_version }} runs-on: ${{ fromJson(format('["self-hosted", "{0}"]', matrix.runner)) }} @@ -98,15 +98,15 @@ jobs: uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }} - name: Login to Proxy Registry uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }} - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 diff --git a/.github/workflows/get-environment.yml b/.github/workflows/get-environment.yml new file mode 100644 index 00000000000..d88b4128337 --- /dev/null +++ b/.github/workflows/get-environment.yml @@ -0,0 +1,290 @@ +on: + workflow_call: + inputs: + version_file: + required: false + type: string + default: CMakeLists.txt + outputs: + latest_major_version: + description: "latest major version" + value: ${{ jobs.get-environment.outputs.latest_major_version }} + is_cloud: + description: "context of release (cloud or not cloud)" + value: ${{ jobs.get-environment.outputs.is_cloud }} + major_version: + description: "major version" + value: ${{ jobs.get-environment.outputs.major_version }} + minor_version: + description: "minor version" + value: ${{ jobs.get-environment.outputs.minor_version }} + release: + description: "release" + value: ${{ jobs.get-environment.outputs.release }} + stability: + description: "branch stability (stable, testing, unstable, canary)" + value: ${{ jobs.get-environment.outputs.stability }} + target_stability: + description: "Final target branch stability (stable, testing, unstable, canary or not defined if not a pull request)" + value: ${{ jobs.get-environment.outputs.target_stability }} + release_type: + description: "type of release (hotfix, release or not defined if not a release)" + value: ${{ jobs.get-environment.outputs.release_type }} + is_targeting_feature_branch: + description: "if it is a PR, check if targeting a feature branch" + value: ${{ jobs.get-environment.outputs.is_targeting_feature_branch }} + img_version: + description: "docker image version (vcpkg checksum)" + value: ${{ jobs.get-environment.outputs.img_version }} + test_img_version: + description: "test docker image version (checksum of database sql, script and dockerfiles)" + value: ${{ jobs.get-environment.outputs.test_img_version }} + gorgone_docker_version: + description: "md5 of gorgone dockerfile" + value: ${{ jobs.get-environment.outputs.gorgone_docker_version }} + +jobs: + get-environment: + runs-on: ubuntu-24.04 + outputs: + latest_major_version: ${{ steps.latest_major_version.outputs.latest_major_version }} + is_cloud: ${{ steps.detect_cloud_version.outputs.result }} + major_version: ${{ steps.get_version.outputs.major_version }} + minor_version: ${{ steps.get_version.outputs.minor_version }} + release: ${{ steps.get_release.outputs.release }} + stability: ${{ steps.get_stability.outputs.stability }} + target_stability: ${{ steps.get_stability.outputs.target_stability }} + release_type: ${{ steps.get_release_type.outputs.release_type }} + is_targeting_feature_branch: ${{ steps.get_stability.outputs.is_targeting_feature_branch }} + img_version: ${{ steps.get_docker_images_version.outputs.img_version }} + test_img_version: ${{ steps.get_docker_images_version.outputs.test_img_version }} + gorgone_docker_version: ${{ steps.get_docker_images_version.outputs.gorgone_docker_version }} + + steps: + - name: Checkout sources (current branch) + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + + # get latest major version to detect cloud / on-prem versions + - name: Checkout sources (develop branch) + uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0 + with: + ref: develop + path: centreon-develop + sparse-checkout: .version + + - name: Store latest major version + id: latest_major_version + run: | + . centreon-develop/.version + echo "latest_major_version=$MAJOR" >> $GITHUB_OUTPUT + shell: bash + + - if: ${{ github.event_name == 'pull_request' }} + name: Get nested pull request path + id: pr_path + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const prPath = ['${{ github.head_ref }}', '${{ github.base_ref }}']; + + const result = await github.rest.pulls.list({ + owner: context.repo.owner, + repo: context.repo.repo, + per_page: 100, + state: 'open' + }); + + let found = true; + while (found) { + found = false; + result.data.forEach(({ head: { ref: headRef }, base: { ref: baseRef} }) => { + if (headRef === prPath[prPath.length - 1] && ! prPath.includes(baseRef)) { + found = true; + prPath.push(baseRef); + } + }); + } + + return prPath; + + - name: Get stability + id: get_stability + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + const getStability = (branchName) => { + switch (true) { + case /(^develop$)|(^dev-\d{2}\.\d{2}\.x$)|(^prepare-release-cloud.*)/.test(branchName): + return 'unstable'; + case /(^release.+)|(^hotfix.+)/.test(branchName): + return 'testing'; + case /(^master$)|(^\d{2}\.\d{2}\.x$)/.test(branchName): + return 'stable'; + default: + return 'canary'; + } + }; + + core.setOutput('stability', getStability('${{ github.head_ref || github.ref_name }}')); + + let isTargetingFeatureBranch = false; + if ("${{ github.event_name }}" === "pull_request") { + let targetStability = 'canary'; + const prPath = ${{ steps.pr_path.outputs.result || '[]' }}; + prPath.shift(); // remove current branch + + if (prPath.length && getStability(prPath[0]) === 'canary') { + isTargetingFeatureBranch = true; + } + + prPath.every((branchName) => { + console.log(`checking stability of ${branchName}`) + targetStability = getStability(branchName); + + if (targetStability !== 'canary') { + return false; + } + + return true; + }); + + core.setOutput('target_stability', targetStability); + } + + core.setOutput('is_targeting_feature_branch', isTargetingFeatureBranch); + + - name: Get version from ${{ inputs.version_file }} + id: get_version + run: | + if [[ "${{ inputs.version_file }}" == */.version ]]; then + . .version + . ${{ inputs.version_file }} + VERSION="$MAJOR.$MINOR" + elif [[ "${{ inputs.version_file }}" == CMakeLists.txt ]]; then + MAJOR=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) + MINOR=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) + VERSION="$MAJOR.$MINOR" + else + echo "Unable to parse version file ${{ inputs.version_file }}" + exit 1 + fi + + if grep -E '^[2-9][0-9]\.[0-9][0-9]\.[0-9]+' <<<"$VERSION" >/dev/null 2>&1 ; then + n=${VERSION//[!0-9]/ } + a=(${n//\./ }) + echo "major_version=${a[0]}.${a[1]}" >> $GITHUB_OUTPUT + MAJOR=${a[0]}.${a[1]} + echo "minor_version=${a[2]}" >> $GITHUB_OUTPUT + else + echo "Cannot parse version number from ${{ inputs.version_file }}" + exit 1 + fi + shell: bash + + - name: "Get release: 1 for testing / stable, . for others" + id: get_release + run: | + if [[ "${{ steps.get_stability.outputs.stability }}" == "testing" || "${{ steps.get_stability.outputs.stability }}" == "stable" ]]; then + RELEASE="1" + else + RELEASE="$(date +%s).$(echo ${{ github.sha }} | cut -c -7)" + fi + + echo "release=$RELEASE" >> $GITHUB_OUTPUT + shell: bash + + - name: "Get release type: hotfix, release or not defined if not a release" + id: get_release_type + run: | + RELEASE_TYPE=$(echo "${{ github.head_ref || github.ref_name }}" | cut -d '-' -f 1) + if [[ "$RELEASE_TYPE" == "hotfix" || "$RELEASE_TYPE" == "release" ]]; then + echo "release_type=$RELEASE_TYPE" >> $GITHUB_OUTPUT + fi + shell: bash + + - name: "Detect cloud version" + id: detect_cloud_version + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + with: + script: | + if ("${{ github.event_name }}" === "pull_request") { + const prPath = ${{ steps.pr_path.outputs.result || '[]' }}; + const finalTargetBranch = prPath.pop(); + if (['develop', 'master'].includes(finalTargetBranch)) { + return true; + } else if (/\d{2}\.\d{2}\.x$/.test(finalTargetBranch)) { + return false; + } + } + + const developMajorVersion = "${{ steps.latest_major_version.outputs.latest_major_version }}"; + const currentMajorVersion = "${{ steps.get_version.outputs.major_version }}"; + + if (Number(currentMajorVersion) >= Number(developMajorVersion)) { + return true; + } + + return false; + + - name: Get docker images version + id: get_docker_images_version + run: | + IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}') + echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT + + TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8) + echo "test_img_version=$TEST_IMG_VERSION" >> $GITHUB_OUTPUT + + GORGONE_DOCKER_VERSION=$(cat .github/docker/Dockerfile.gorgone-testing-* | md5sum | cut -c1-8) + echo "gorgone_docker_version=$GORGONE_DOCKER_VERSION" >> $GITHUB_OUTPUT + + - name: Display info in job summary + uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + env: + VERSION_FILE: ${{ inputs.version_file }} + with: + script: | + const outputTable = [ + [{data: 'Name', header: true}, {data: 'Value', header: true}], + ['latest_major_version', '${{ steps.latest_major_version.outputs.latest_major_version }}'], + ['is_cloud', '${{ steps.detect_cloud_version.outputs.result }}'], + ['major_version', '${{ steps.get_version.outputs.major_version }}'], + ['minor_version', '${{ steps.get_version.outputs.minor_version }}'], + ['release', '${{ steps.get_release.outputs.release }}'], + ['stability', '${{ steps.get_stability.outputs.stability }}'], + ['release_type', '${{ steps.get_release_type.outputs.release_type || 'not defined because this is not a release' }}'], + ['is_targeting_feature_branch', '${{ steps.get_stability.outputs.is_targeting_feature_branch }}'], + ['img_version', '${{ steps.get_docker_images_version.outputs.img_version }}'], + ['test_img_version', '${{ steps.get_docker_images_version.outputs.test_img_version }}'], + ['gorgone_docker_version', '${{ steps.get_docker_images_version.outputs.gorgone_docker_version }}'], + ]; + + outputTable.push(['target_stability', '${{ steps.get_stability.outputs.target_stability || 'not defined because current run is not triggered by pull request event' }}']); + + core.summary + .addHeading(`${context.workflow} environment outputs`) + .addTable(outputTable); + + if ("${{ github.event_name }}" === "pull_request") { + const prPath = ${{ steps.pr_path.outputs.result || '[]' }}; + const mainBranchName = prPath.pop(); + let codeBlock = ` + %%{ init: { 'gitGraph': { 'mainBranchName': '${mainBranchName}', 'showCommitLabel': false } } }%% + gitGraph + commit`; + prPath.reverse().forEach((branchName) => { + codeBlock = `${codeBlock} + branch ${branchName} + checkout ${branchName} + commit`; + }); + + core.summary + .addHeading('Git workflow') + .addCodeBlock( + codeBlock, + "mermaid" + ); + } + + core.summary.write(); diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml deleted file mode 100644 index 01ce4667f6e..00000000000 --- a/.github/workflows/get-version.yml +++ /dev/null @@ -1,209 +0,0 @@ -on: - workflow_call: - inputs: - version_file: - required: false - type: string - default: CMakeLists.txt - outputs: - major_version: - description: "major version" - value: ${{ jobs.get-version.outputs.major_version }} - minor_version: - description: "minor version" - value: ${{ jobs.get-version.outputs.minor_version }} - img_version: - description: "docker image version (vcpkg checksum)" - value: ${{ jobs.get-version.outputs.img_version }} - test_img_version: - description: "test docker image version (checksum of database sql, script and dockerfiles)" - value: ${{ jobs.get-version.outputs.test_img_version }} - version: - description: "major version" - value: ${{ jobs.get-version.outputs.version }} - release: - description: "release" - value: ${{ jobs.get-version.outputs.release }} - stability: - description: "branch stability (stable, testing, unstable, canary)" - value: ${{ jobs.get-version.outputs.stability }} - environment: - description: "branch stability (stable, testing, unstable, canary)" - value: ${{ jobs.get-version.outputs.environment }} - release_type: - description: "type of release (hotfix, release)" - value: ${{ jobs.get-version.outputs.release_type }} - release_cloud: - description: "context of release (cloud or not cloud)" - value: ${{ jobs.get-version.outputs.release_cloud }} - -jobs: - get-version: - runs-on: ubuntu-24.04 - outputs: - major_version: ${{ steps.get_version.outputs.major_version }} - minor_version: ${{ steps.get_version.outputs.minor_version }} - img_version: ${{ steps.get_version.outputs.img_version }} - test_img_version: ${{ steps.get_version.outputs.test_img_version }} - version: ${{ steps.get_version.outputs.version }} - release: ${{ steps.get_version.outputs.release }} - stability: ${{ steps.get_version.outputs.stability }} - environment: ${{ steps.get_version.outputs.env }} - release_type: ${{ steps.get_version.outputs.release_type }} - release_cloud: ${{ steps.get_version.outputs.release_cloud}} - - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - - name: install gh cli on self-hosted runner - run: | - if ! command -v gh &> /dev/null; then - echo "Installing GH CLI." - type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y) - curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg - sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null - sudo apt update - sudo apt install gh -y - else - echo "GH CLI is already installed." - fi - shell: bash - - - id: get_version - run: | - set -x - - if [[ "${{ inputs.version_file }}" == */.version ]]; then - . .version - . ${{ inputs.version_file }} - VERSION="$MAJOR.$MINOR" - elif [[ "${{ inputs.version_file }}" == CMakeLists.txt ]]; then - MAJOR=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) - MINOR=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) - VERSION="$MAJOR.$MINOR" - else - echo "Unable to parse ${{ inputs.version_file }}" - exit 1 - fi - - echo "VERSION=$VERSION" - - if egrep '^[2-9][0-9]\.[0-9][0-9]\.[0-9]+' <<<"$VERSION" >/dev/null 2>&1 ; then - n=${VERSION//[!0-9]/ } - a=(${n//\./ }) - echo "major_version=${a[0]}.${a[1]}" >> $GITHUB_OUTPUT - MAJOR=${a[0]}.${a[1]} - echo "minor_version=${a[2]}" >> $GITHUB_OUTPUT - else - echo "Cannot parse version number from ${{ inputs.version_file }}" - exit 1 - fi - - IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}') - TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8) - echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT - echo "test_img_version=$TEST_IMG_VERSION" >> $GITHUB_OUTPUT - echo "version=$VERSION" >> $GITHUB_OUTPUT - - if [[ -z "$GITHUB_HEAD_REF" ]]; then - BRANCHNAME="$GITHUB_REF_NAME" - else - BRANCHNAME="$GITHUB_HEAD_REF" - fi - - echo "BRANCHNAME is: $BRANCHNAME" - - # Set default release values - GITHUB_RELEASE_CLOUD=0 - GITHUB_RELEASE_TYPE=$(echo $BRANCHNAME |cut -d '-' -f 1) - - case "$BRANCHNAME" in - master) - echo "release=1" >> $GITHUB_OUTPUT - echo "release_cloud=1" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - [2-9][0-9].[0-9][0-9].x) - echo "release=1" >> $GITHUB_OUTPUT - echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - develop) - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_cloud=1" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - dev-[2-9][0-9].[0-9][0-9].x) - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_cloud=0" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - release* | hotfix*) - # Handle workflow_dispatch run triggers and run a dispatch ONLY for cloud release - GITHUB_RELEASE_BRANCH_BASE_REF_NAME="$(gh pr view $BRANCHNAME -q .baseRefName --json headRefName,baseRefName,state)" - echo "GITHUB_RELEASE_BRANCH_BASE_REF_NAME is: $GITHUB_RELEASE_BRANCH_BASE_REF_NAME" - GITHUB_RELEASE_BRANCH_PR_STATE="$(gh pr view $BRANCHNAME -q .state --json headRefName,baseRefName,state)" - echo "GITHUB_RELEASE_BRANCH_PR_STATE is: $GITHUB_RELEASE_BRANCH_PR_STATE" - - # Check if the release context (cloud and hotfix or cloud and release) - if [[ "$GITHUB_RELEASE_BRANCH_BASE_REF_NAME" == "master" ]] && [[ "$GITHUB_RELEASE_BRANCH_PR_STATE" == "OPEN" ]]; then - # Get release pull request ID - GITHUB_RELEASE_BRANCH_PR_NUMBER="$(gh pr view $BRANCHNAME -q .[] --json number)" - # Set release cloud to 1 (0=not-cloud, 1=cloud) - GITHUB_RELEASE_CLOUD=1 - # Debug - echo "GITHUB_RELEASE_TYPE is: $GITHUB_RELEASE_TYPE" - echo "GITHUB_RELEASE_BRANCH_PR_NUMBER is: $GITHUB_RELEASE_BRANCH_PR_NUMBER" # We do leave this here as debug help. - echo "GITHUB_RELEASE_CLOUD is: $GITHUB_RELEASE_CLOUD" - # Github ouputs - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT - else - echo "release=1" >> $GITHUB_OUTPUT - echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - fi - ;; - prepare-release-cloud*) - # Set release cloud to 1 (0=not-cloud, 1=cloud) - GITHUB_RELEASE_CLOUD=1 - # Debug - echo "GITHUB_RELEASE_TYPE is: $GITHUB_RELEASE_TYPE" - echo "GITHUB_RELEASE_CLOUD is: $GITHUB_RELEASE_CLOUD" - # Github ouputs - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT - ;; - *) - echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT - echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT - echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT - ;; - esac - - case "$BRANCHNAME" in - develop | dev-[2-9][0-9].[0-9][0-9].x) - STABILITY="unstable" - ENV="development" - ;; - release* | hotfix*) - STABILITY="testing" - ENV="testing" - ;; - master | [2-9][0-9].[0-9][0-9].x) - STABILITY="stable" - ENV="production" - ;; - *) - STABILITY="canary" - ;; - esac - echo "stability=$STABILITY" >> $GITHUB_OUTPUT - echo "env=$VERSION-$ENV" >> $GITHUB_OUTPUT - echo "GH_ENV: $VERSION-$ENV" - shell: bash - env: - GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml index 6438b28e281..611b7f53491 100644 --- a/.github/workflows/gorgone.yml +++ b/.github/workflows/gorgone.yml @@ -33,30 +33,31 @@ env: base_directory: gorgone jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: gorgone/.version veracode-analysis: - needs: [get-version] + needs: [get-environment] + if: ${{ needs.get-environment.outputs.is_targeting_feature_branch != 'true' && github.event.pull_request.draft != 'true' }} uses: ./.github/workflows/veracode-analysis.yml with: module_directory: gorgone module_name: centreon-gorgone - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} secrets: veracode_api_id: ${{ secrets.VERACODE_API_ID_GORG }} veracode_api_key: ${{ secrets.VERACODE_API_KEY_GORG }} veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + docker_registry_id: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + docker_registry_passwd: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} package: - needs: [get-version] - if: ${{ needs.get-version.outputs.stability != 'stable' }} + needs: [get-environment] + if: ${{ needs.get-environment.outputs.stability != 'stable' }} strategy: fail-fast: false @@ -79,13 +80,13 @@ jobs: image: packaging-nfpm-jammy distrib: jammy - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} @@ -107,7 +108,7 @@ jobs: if: ${{ matrix.package_extension == 'rpm' }} run: | cd gorgone/selinux - sed -i "s/@VERSION@/${{ needs.get-version.outputs.major_version }}.${{ needs.get-version.outputs.minor_version }}/g" centreon-gorgoned.te + sed -i "s/@VERSION@/${{ needs.get-environment.outputs.major_version }}.${{ needs.get-environment.outputs.minor_version }}/g" centreon-gorgoned.te make -f /usr/share/selinux/devel/Makefile shell: bash @@ -122,21 +123,21 @@ jobs: nfpm_file_pattern: "gorgone/packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - release: ${{ needs.get-version.outputs.release }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + release: ${{ needs.get-environment.outputs.release }} arch: all commit_hash: ${{ github.sha }} cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-version.outputs.stability }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-sources: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, package] + if: ${{ contains(fromJson('["stable"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch' }} steps: - name: Checkout sources @@ -148,14 +149,14 @@ jobs: bucket_directory: centreon-gorgone module_directory: gorgone module_name: centreon-gorgone - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} deliver-rpm: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment, package] + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-environment.outputs.stability) }} strategy: matrix: @@ -170,17 +171,17 @@ jobs: with: module_name: gorgone distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: runs-on: [self-hosted, common] - needs: [get-version, package] - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-environment, package] + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-environment.outputs.stability) }} strategy: matrix: @@ -195,16 +196,20 @@ jobs: with: module_name: gorgone distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, deliver-rpm, deliver-deb] + if: | + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -220,8 +225,8 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: gorgone distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml index ad0adeb625a..89f6f764ae7 100644 --- a/.github/workflows/libzmq.yml +++ b/.github/workflows/libzmq.yml @@ -19,11 +19,11 @@ on: - '.github/workflows/libzmq.yml' jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml package-rpm: - needs: [get-version] + needs: [get-environment] strategy: fail-fast: false @@ -36,13 +36,13 @@ jobs: distrib: el9 arch: amd64 - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} @@ -73,7 +73,7 @@ jobs: key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} package-deb: - needs: [get-version] + needs: [get-environment] strategy: fail-fast: false @@ -85,11 +85,11 @@ jobs: arch: amd64 - image: packaging-nfpm-bookworm distrib: bookworm - runner: ubuntu-22.04 + runner: ubuntu-24.04 arch: amd64 - image: packaging-nfpm-jammy distrib: jammy - runner: ubuntu-22.04 + runner: ubuntu-24.04 arch: amd64 - image: packaging-bullseye-arm64 distrib: bullseye @@ -99,14 +99,23 @@ jobs: runs-on: ${{ matrix.runner }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.major_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Parse distrib name + id: parse-distrib + uses: ./.github/actions/parse-distrib + with: + distrib: ${{ matrix.distrib }} + - name: package deb run: | apt-get update @@ -122,7 +131,7 @@ jobs: wget -O - https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.5.tar.gz | tar zxvf - cd libzmq-4.3.5 ln -s packaging/debian - sed -Ei 's/([0-9]+.[0-9]+.[0-9]+-[0-9]+.[0-9]+)/\1~${{ matrix.distrib }}/' debian/changelog + sed -Ei 's/([0-9]+.[0-9]+.[0-9]+-[0-9]+.[0-9]+)/\1${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}/' debian/changelog sed -Ei 's/UNRELEASED/${{ matrix.distrib }}/' debian/changelog dpkg-buildpackage -us -uc -nc cd .. @@ -137,9 +146,8 @@ jobs: key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} deliver-rpm: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package-rpm] - environment: ${{ needs.get-version.outputs.environment }} + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package-rpm] runs-on: [self-hosted, common] strategy: matrix: @@ -160,17 +168,16 @@ jobs: with: module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package-deb] - environment: ${{ needs.get-version.outputs.environment }} + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package-deb] runs-on: [self-hosted, common] strategy: matrix: @@ -195,16 +202,20 @@ jobs: with: module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, deliver-rpm, deliver-deb] + if: | + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -220,8 +231,8 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml index 96815e14c36..6f813e07880 100644 --- a/.github/workflows/lua-curl.yml +++ b/.github/workflows/lua-curl.yml @@ -24,12 +24,12 @@ env: release: 20 # 10 for openssl 1.1.1 / 20 for openssl system jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml package: - needs: [get-version] - if: ${{ needs.get-version.outputs.stability != 'stable' }} + needs: [get-environment] + if: ${{ needs.get-environment.outputs.stability != 'stable' }} strategy: fail-fast: false @@ -81,10 +81,10 @@ jobs: runs-on: ${{ matrix.runner }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.img_version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-environment.outputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} @@ -138,12 +138,12 @@ jobs: rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} - stability: ${{ needs.get-version.outputs.stability }} + stability: ${{ needs.get-environment.outputs.stability }} deliver-rpm: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] - runs-on: ubuntu-22.04 + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-24.04 strategy: matrix: include: @@ -162,17 +162,17 @@ jobs: with: module_name: lua-curl distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: - if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} - needs: [get-version, package] - runs-on: ubuntu-22.04 + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-environment.outputs.stability) }} + needs: [get-environment, package] + runs-on: ubuntu-24.04 strategy: matrix: include: @@ -195,16 +195,20 @@ jobs: with: module_name: lua-curl distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.major_version }} + version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} promote: - needs: [get-version] - if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + needs: [get-environment, deliver-rpm, deliver-deb] + if: | + (contains(fromJson('["stable", "testing"]'), needs.get-environment.outputs.stability) && github.event_name != 'workflow_dispatch') && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') runs-on: [self-hosted, common] strategy: matrix: @@ -220,8 +224,8 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: lua-curl distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} - stability: ${{ needs.get-version.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + stability: ${{ needs.get-environment.outputs.stability }} github_ref_name: ${{ github.ref_name }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index bb41d9d71fc..35e2873b199 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -76,8 +76,8 @@ jobs: container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ inputs.img_version }} credentials: - username: ${{ secrets.DOCKER_REGISTRY_ID }} - password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} name: package ${{ matrix.distrib }} ${{ matrix.arch }} diff --git a/.github/workflows/rebase-master.yml b/.github/workflows/rebase-master.yml index c2241297a0e..e9336085c6c 100644 --- a/.github/workflows/rebase-master.yml +++ b/.github/workflows/rebase-master.yml @@ -12,7 +12,7 @@ on: jobs: main: name: Sync Stable Branches - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event.pull_request.merged == true steps: - name: git checkout diff --git a/.github/workflows/rebase-version.yml b/.github/workflows/rebase-version.yml index 4be9a45361f..8c3f36d26cc 100644 --- a/.github/workflows/rebase-version.yml +++ b/.github/workflows/rebase-version.yml @@ -12,7 +12,7 @@ on: jobs: main: name: Sync Stable Branches - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 if: github.event.pull_request.merged == true steps: - name: git checkout diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index e20fa63d9d6..aa40dc0d7e4 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -21,7 +21,7 @@ on: jobs: release: if: ${{ github.event.pull_request.merged == true }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Check base_ref run: | diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index dd10ad0242c..1e6c18717b1 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -27,40 +27,40 @@ jobs: env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt veracode-analysis: - needs: [get-version] + needs: [get-environment] uses: ./.github/workflows/veracode-analysis.yml with: module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} secrets: veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} veracode_api_key: ${{ secrets.VERACODE_API_KEY_COLL }} veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} - docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} - docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + docker_registry_id: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + docker_registry_passwd: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} package: - needs: [get-version] + needs: [get-environment] uses: ./.github/workflows/package-collect.yml with: - stability: ${{ needs.get-version.outputs.stability }} - major_version: ${{ needs.get-version.outputs.major_version }} - minor_version: ${{ needs.get-version.outputs.minor_version }} - img_version: ${{ needs.get-version.outputs.img_version }} - release: ${{ needs.get-version.outputs.release }} + stability: ${{ needs.get-environment.outputs.stability }} + major_version: ${{ needs.get-environment.outputs.major_version }} + minor_version: ${{ needs.get-environment.outputs.minor_version }} + img_version: ${{ needs.get-environment.outputs.img_version }} + release: ${{ needs.get-environment.outputs.release }} commit_hash: ${{ github.sha }} secrets: inherit robot-test: - needs: [get-version, package] + needs: [get-environment, package] strategy: fail-fast: false @@ -116,24 +116,24 @@ jobs: distrib: ${{ matrix.distrib }} arch: ${{ matrix.arch }} image: ${{ matrix.image }} - image_test: ${{ matrix.image }}:${{ needs.get-version.outputs.test_img_version }} - image_version: ${{ needs.get-version.outputs.img_version }} + image_test: ${{ matrix.image }}:${{ needs.get-environment.outputs.test_img_version }} + image_version: ${{ needs.get-environment.outputs.img_version }} package_cache_key: ${{ github.run_id }}-${{ github.sha }}-${{ matrix.package_extension }}-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} package_cache_path: ./*.${{ matrix.package_extension}} database_type: ${{ matrix.database_type }} tests_params: ${{matrix.tests_params}} test_group_name: ${{matrix.test_group_name}} secrets: - registry_username: ${{ secrets.DOCKER_REGISTRY_ID }} - registry_password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + registry_username: ${{ secrets.HARBOR_CENTREON_PULL_USERNAME }} + registry_password: ${{ secrets.HARBOR_CENTREON_PULL_TOKEN }} collect_s3_access_key: ${{ secrets.COLLECT_S3_ACCESS_KEY }} collect_s3_secret_key: ${{ secrets.COLLECT_S3_SECRET_KEY }} xray_client_id: ${{ secrets.XRAY_CLIENT_ID }} xray_client_secret: ${{ secrets.XRAY_CLIENT_SECRET }} deliver-rpm: - if: ${{ contains(fromJson('["unstable"]'), needs.get-version.outputs.stability) }} - needs: [robot-test, get-version] + if: ${{ contains(fromJson('["unstable"]'), needs.get-environment.outputs.stability) }} + needs: [robot-test, get-environment] runs-on: [self-hosted, common] strategy: matrix: @@ -149,16 +149,16 @@ jobs: with: module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-amd64-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} deliver-deb: - if: ${{ contains(fromJson('["unstable"]'), needs.get-version.outputs.stability) }} - needs: [robot-test, get-version] + if: ${{ contains(fromJson('["unstable"]'), needs.get-environment.outputs.stability) }} + needs: [robot-test, get-environment] runs-on: [self-hosted, common] strategy: matrix: @@ -182,9 +182,9 @@ jobs: with: module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.major_version }} + major_version: ${{ needs.get-environment.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} - stability: ${{ needs.get-version.outputs.stability }} - release_type: ${{ needs.get-version.outputs.release_type }} - release_cloud: ${{ needs.get-version.outputs.release_cloud }} + stability: ${{ needs.get-environment.outputs.stability }} + release_type: ${{ needs.get-environment.outputs.release_type }} + is_cloud: ${{ needs.get-environment.outputs.is_cloud }} diff --git a/.github/workflows/robot-test.yml b/.github/workflows/robot-test.yml index d0a0398efb5..eca3f40ac90 100644 --- a/.github/workflows/robot-test.yml +++ b/.github/workflows/robot-test.yml @@ -47,7 +47,7 @@ on: jobs: test-image-to-cache: - runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-22.04' }} + runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-24.04' }} steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -84,7 +84,7 @@ jobs: robot-test-list: needs: [test-image-to-cache] - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: features: ${{ steps.list-features.outputs.features }} @@ -100,7 +100,7 @@ jobs: robot-test: needs: [robot-test-list] - runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-22.04' }} + runs-on: ${{ contains(inputs.image, 'arm') && fromJson('["self-hosted", "collect-arm64"]') || 'ubuntu-24.04' }} strategy: fail-fast: false @@ -192,7 +192,7 @@ jobs: robot-test-report: needs: [robot-test] if: ${{ failure() }} - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index 23361521e81..99c81a3ca20 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -32,7 +32,7 @@ on: jobs: routing: name: Check before analysis - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: development_stage: ${{ steps.routing-mode.outputs.development_stage }} skip_analysis: ${{ steps.routing-mode.outputs.skip_analysis }} @@ -169,7 +169,7 @@ jobs: name: Sandbox scan needs: [routing, build] if: needs.routing.outputs.development_stage != 'Development' - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Promote latest scan diff --git a/.github/workflows/windows-agent.yml b/.github/workflows/windows-agent.yml index a57afbca347..67700206827 100644 --- a/.github/workflows/windows-agent.yml +++ b/.github/workflows/windows-agent.yml @@ -27,13 +27,13 @@ on: - vcpkg.json jobs: - get-version: - uses: ./.github/workflows/get-version.yml + get-environment: + uses: ./.github/workflows/get-environment.yml with: version_file: CMakeLists.txt build-and-test-agent: - needs: [get-version] + needs: [get-environment] runs-on: windows-latest env: AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} @@ -71,7 +71,7 @@ jobs: - name: Upload package artifacts if: | github.event_name != 'workflow_dispatch' && - contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && + contains(fromJson('["stable"]'), needs.get-environment.outputs.stability) && ! cancelled() && ! contains(needs.*.result, 'failure') && ! contains(needs.*.result, 'cancelled') diff --git a/.version b/.version index f2436a2f8cd..be9d7ead1f6 100644 --- a/.version +++ b/.version @@ -1,2 +1,2 @@ MAJOR=24.04 -MINOR=6 +MINOR=7 diff --git a/CMakeLists.txt b/CMakeLists.txt index 9823a55dc01..bd1b441cd9e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -110,7 +110,7 @@ endif() # Version. set(COLLECT_MAJOR 24) set(COLLECT_MINOR 04) -set(COLLECT_PATCH 6) +set(COLLECT_PATCH 7) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") diff --git a/CMakeListsWindows.txt b/CMakeListsWindows.txt index 88352ea1e1f..186c32f7760 100644 --- a/CMakeListsWindows.txt +++ b/CMakeListsWindows.txt @@ -16,15 +16,14 @@ # For more information : contact@centreon.com # -# When we build from cache (CI), we don't use vcpkg cmaketool, so we tell to cmake where to find packages info +# When we build from cache (CI), we do not use vcpkg cmaketool, so we tell to cmake where to find packages info if (BUILD_FROM_CACHE) LIST(APPEND CMAKE_PREFIX_PATH "build_windows/vcpkg_installed/x64-windows") endif() -#in order to make fmt compile +#In order to make fmt compile add_definitions("/utf-8") - find_package(fmt CONFIG REQUIRED) find_package(spdlog CONFIG REQUIRED) find_package(gRPC CONFIG REQUIRED) diff --git a/bbdo/bam.proto b/bbdo/bam.proto index a51e7fd7bdf..50e748158bf 100644 --- a/bbdo/bam.proto +++ b/bbdo/bam.proto @@ -29,14 +29,14 @@ enum State { UNKNOWN = 3; } -/*io::bam, bam::de_pb_inherited_downtime*/ +/* io::bam, bam::de_pb_inherited_downtime, 31 */ message InheritedDowntime { BBDOHeader header = 1; uint32 ba_id = 2; bool in_downtime = 3; } -/*io::bam, bam::de_pb_ba_status*/ +/* io::bam, bam::de_pb_ba_status, 32 */ message BaStatus { uint32 ba_id = 1; bool in_downtime = 2; @@ -49,7 +49,7 @@ message BaStatus { string output = 9; } -/*io::bam, bam::de_pb_ba_event*/ +/* io::bam, bam::de_pb_ba_event, 33 */ message BaEvent { uint32 ba_id = 1; double first_level = 2; @@ -59,7 +59,7 @@ message BaEvent { State status = 6; } -/*io::bam, bam::de_pb_kpi_event*/ +/* io::bam, bam::de_pb_kpi_event, 34 */ message KpiEvent { uint32 ba_id = 1; uint64 start_time = 2; @@ -72,20 +72,20 @@ message KpiEvent { State status = 9; } -/*io::bam, bam::de_pb_dimension_bv_event*/ +/* io::bam, bam::de_pb_dimension_bv_event, 35 */ message DimensionBvEvent { uint32 bv_id = 1; string bv_name = 2; string bv_description = 3; } -/*io::bam, bam::de_pb_dimension_ba_bv_relation_event*/ +/* io::bam, bam::de_pb_dimension_ba_bv_relation_event, 36 */ message DimensionBaBvRelationEvent { uint32 ba_id = 1; uint32 bv_id = 2; } -/*io::bam, bam::de_pb_dimension_timeperiod*/ +/* io::bam, bam::de_pb_dimension_timeperiod, 37 */ message DimensionTimeperiod { uint32 id = 1; string name = 2; @@ -98,7 +98,7 @@ message DimensionTimeperiod { string sunday = 9; } -/*io::bam, bam::de_pb_dimension_ba_event*/ +/* io::bam, bam::de_pb_dimension_ba_event, 38 */ message DimensionBaEvent { uint32 ba_id = 1; string ba_name = 2; @@ -109,7 +109,7 @@ message DimensionBaEvent { uint32 sla_duration_warn = 7; } -/*io::bam, bam::de_pb_dimension_kpi_event*/ +/* io::bam, bam::de_pb_dimension_kpi_event, 39 */ message DimensionKpiEvent { uint32 kpi_id = 1; uint32 ba_id = 2; @@ -129,7 +129,7 @@ message DimensionKpiEvent { double impact_unknown = 16; } -/*io::bam, bam::de_pb_kpi_status*/ +/* io::bam, bam::de_pb_kpi_status, 40 */ message KpiStatus { uint32 kpi_id = 1; bool in_downtime = 2; @@ -146,7 +146,7 @@ message KpiStatus { bool valid = 13; } -/*io::bam, bam::de_pb_ba_duration_event*/ +/* io::bam, bam::de_pb_ba_duration_event, 41 */ message BaDurationEvent { uint32 ba_id = 1; int64 real_start_time = 2; @@ -158,14 +158,14 @@ message BaDurationEvent { bool timeperiod_is_default = 8; } -/*io::bam, bam::de_pb_dimension_ba_timeperiod_relation*/ +/* io::bam, bam::de_pb_dimension_ba_timeperiod_relation, 42 */ message DimensionBaTimeperiodRelation { uint32 ba_id = 1; uint32 timeperiod_id = 2; bool is_default = 3; } -/*io::bam, bam::de_pb_dimension_truncate_table_signal*/ +/* io::bam, bam::de_pb_dimension_truncate_table_signal, 43 */ message DimensionTruncateTableSignal { bool update_started = 1; } diff --git a/bbdo/bam_state.proto b/bbdo/bam_state.proto index 19a567c4f58..5db522dcfb6 100644 --- a/bbdo/bam_state.proto +++ b/bbdo/bam_state.proto @@ -26,6 +26,7 @@ package com.centreon.broker; * @brief Stores needed information for bool_service or kpi_service. Very useful * when broker is stopped to save the BA's states. */ +/* Ignore */ message ServiceState { uint64 host_id = 1; uint64 service_id = 2; @@ -40,7 +41,7 @@ message ServiceState { * @brief This message contains the living informations of the current BA's. * Thanks to them we can recompute their states when Broker is restarted. */ -/*io::bam, bam::de_pb_services_book_state*/ +/* io::bam, bam::de_pb_services_book_state, 48 */ message ServicesBookState { repeated ServiceState service = 1; } diff --git a/bbdo/bbdo.proto b/bbdo/bbdo.proto index a9823ccaf81..ee3ffa21a07 100644 --- a/bbdo/bbdo.proto +++ b/bbdo/bbdo.proto @@ -20,14 +20,14 @@ syntax = "proto3"; package com.centreon.broker; - +/* Ignore */ message Bbdo { uint32 major = 1; uint32 minor = 2; uint32 patch = 3; } -/*io::bbdo, bbdo::de_welcome*/ +/* io::bbdo, bbdo::de_welcome, 49 */ message Welcome { Bbdo version = 1; string extensions = 2; @@ -35,12 +35,12 @@ message Welcome { string poller_name = 4; } -/*io::bbdo, bbdo::de_pb_ack*/ +/* io::bbdo, bbdo::de_pb_ack, 50 */ message Ack { uint32 acknowledged_events = 1; } -/*io::bbdo, bbdo::de_pb_stop*/ +/* io::bbdo, bbdo::de_pb_stop, 51 */ message Stop { uint64 poller_id = 1; } diff --git a/bbdo/extcmd.proto b/bbdo/extcmd.proto index f0cadaf0397..3a5bdf186c9 100644 --- a/bbdo/extcmd.proto +++ b/bbdo/extcmd.proto @@ -23,7 +23,7 @@ import "google/protobuf/timestamp.proto"; package com.centreon.broker; - +/* Ignore */ message TimePoint { string name = 1; string function = 2; @@ -36,7 +36,7 @@ message TimePoint { * from engine to dest with several timepoints (one per muxer) * */ -/*io::extcmd, extcmd::de_pb_bench*/ +/* io::extcmd, extcmd::de_pb_bench, 3 */ message Bench { uint32 id = 1; repeated TimePoint points = 2; diff --git a/bbdo/neb.proto b/bbdo/neb.proto index 0f3b73c2e42..ab06297195c 100644 --- a/bbdo/neb.proto +++ b/bbdo/neb.proto @@ -41,7 +41,7 @@ enum AckType { STICKY = 2; } -/*io::neb, neb::de_pb_service*/ +/* io::neb, neb::de_pb_service, 5 */ message Service { uint64 host_id = 1; uint64 service_id = 2; @@ -159,7 +159,7 @@ message Service { /** * @brief Message sent in BBDO 3.0.0 instead of neb::service_status */ -/*io::neb, neb::de_pb_service_status*/ +/* io::neb, neb::de_pb_service_status, 6 */ message ServiceStatus { uint64 host_id = 1; uint64 service_id = 2; @@ -226,7 +226,7 @@ message ServiceStatus { * * Only used with BBDO 3.0 */ -/*io::neb, neb::de_pb_adaptive_service*/ +/* io::neb, neb::de_pb_adaptive_service, 7 */ message AdaptiveService { uint64 host_id = 1; uint64 service_id = 2; @@ -248,7 +248,7 @@ message AdaptiveService { optional string notification_period = 17; } -/*io::neb, neb::de_pb_host*/ +/* io::neb, neb::de_pb_host, 8 */ message Host { uint64 host_id = 1; @@ -355,7 +355,7 @@ message Host { /** * @brief Message sent in BBDO 3.0.0 instead of neb::service_status */ -/*io::neb, neb::de_pb_host_status*/ +/* io::neb, neb::de_pb_host_status, 9 */ message HostStatus { uint64 host_id = 1; @@ -412,7 +412,7 @@ message HostStatus { * * Only used with BBDO 3.0 */ -/*io::neb, neb::de_pb_adaptive_host*/ +/* io::neb, neb::de_pb_adaptive_host, 10 */ message AdaptiveHost { uint64 host_id = 1; @@ -433,7 +433,7 @@ message AdaptiveHost { optional string notification_period = 16; } -/*io::neb, neb::de_pb_comment*/ +/* io::neb, neb::de_pb_comment, 11 */ message Comment { BBDOHeader header = 1; @@ -476,7 +476,7 @@ message Comment { * @brief A downtime is applied on a resource when we don't want notifications * concerning bad states on this resource. */ -/*io::neb, neb::de_pb_downtime*/ +/* io::neb, neb::de_pb_downtime, 12 */ message Downtime { enum DowntimeType { NOT_USED = 0; @@ -504,7 +504,7 @@ message Downtime { bool fixed = 18; } -/*io::neb, neb::de_pb_custom_variable*/ +/* io::neb, neb::de_pb_custom_variable, 13 */ message CustomVariable { enum VarType { HOST = 0; @@ -529,7 +529,7 @@ enum CheckType { CheckPassive = 1; } -/*io::neb, neb::de_pb_host_check*/ +/* io::neb, neb::de_pb_host_check, 14 */ message HostCheck { BBDOHeader header = 1; @@ -540,7 +540,7 @@ message HostCheck { uint64 next_check = 6; } -/*io::neb, neb::de_pb_service_check*/ +/* io::neb, neb::de_pb_service_check, 15 */ message ServiceCheck { BBDOHeader header = 1; @@ -552,7 +552,7 @@ message ServiceCheck { uint64 service_id = 7; } -/*io::neb, neb::de_pb_log_entry*/ +/* io::neb, neb::de_pb_log_entry, 16 */ message LogEntry { enum LogType { SOFT = 0; @@ -590,7 +590,7 @@ message LogEntry { int32 retry = 13; } -/*io::neb, neb::de_pb_instance_status*/ +/* io::neb, neb::de_pb_instance_status, 17 */ message InstanceStatus { BBDOHeader header = 1; @@ -612,7 +612,7 @@ message InstanceStatus { uint64 instance_id = 17; } -/*io::neb, neb::de_pb_instance*/ +/* io::neb, neb::de_pb_instance, 18 */ message Instance { BBDOHeader header = 1; @@ -626,7 +626,7 @@ message Instance { string version = 9; } -/*io::neb, neb::de_pb_responsive_instance*/ +/* io::neb, neb::de_pb_responsive_instance, 19 */ message ResponsiveInstance { BBDOHeader header = 1; @@ -634,7 +634,7 @@ message ResponsiveInstance { bool responsive = 3; } -/*io::neb, neb::de_pb_acknowledgement*/ +/* io::neb, neb::de_pb_acknowledgement, 20 */ message Acknowledgement { uint64 host_id = 1; uint64 service_id = 2; @@ -654,7 +654,7 @@ message Acknowledgement { uint32 state = 12; } -/*io::neb, neb::de_pb_host_dependency*/ +/* io::neb, neb::de_pb_host_dependency, 21 */ message HostDependency { BBDOHeader header = 1; @@ -667,7 +667,7 @@ message HostDependency { string notification_failure_options = 8; } -/*io::neb, neb::de_pb_service_dependency*/ +/* io::neb, neb::de_pb_service_dependency, 22 */ message ServiceDependency { BBDOHeader header = 1; @@ -682,7 +682,7 @@ message ServiceDependency { uint64 service_id = 10; } -/*io::neb, neb::de_pb_host_group*/ +/* io::neb, neb::de_pb_host_group, 23 */ message HostGroup { BBDOHeader header = 1; @@ -692,7 +692,7 @@ message HostGroup { uint64 poller_id = 5; } -/*io::neb, neb::de_pb_service_group*/ +/* io::neb, neb::de_pb_service_group, 24 */ message ServiceGroup { BBDOHeader header = 1; @@ -702,7 +702,7 @@ message ServiceGroup { uint64 poller_id = 5; } -/*io::neb, neb::de_pb_host_group_member*/ +/* io::neb, neb::de_pb_host_group_member, 25 */ message HostGroupMember { BBDOHeader header = 1; @@ -713,7 +713,7 @@ message HostGroupMember { uint64 poller_id = 6; } -/*io::neb, neb::de_pb_service_group_member*/ +/* io::neb, neb::de_pb_service_group_member, 26 */ message ServiceGroupMember { BBDOHeader header = 1; @@ -725,7 +725,7 @@ message ServiceGroupMember { uint64 service_id = 7; } -/*io::neb, neb::de_pb_host_parent*/ +/* io::neb, neb::de_pb_host_parent, 27 */ message HostParent { BBDOHeader header = 1; @@ -734,7 +734,7 @@ message HostParent { uint64 parent_id = 4; } -/*io::neb, neb::de_pb_instance_configuration*/ +/* io::neb, neb::de_pb_instance_configuration, 28 */ message InstanceConfiguration { BBDOHeader header = 1; bool loaded = 2; diff --git a/bbdo/rebuild_message.proto b/bbdo/rebuild_message.proto index afc1e410747..7717833bcf2 100644 --- a/bbdo/rebuild_message.proto +++ b/bbdo/rebuild_message.proto @@ -1,13 +1,13 @@ syntax = "proto3"; package com.centreon.broker; - +/* Ignore */ message Point { int64 ctime = 1; double value = 2; uint32 status = 3; } - +/* Ignore */ message Timeserie { repeated Point pts = 1; int32 data_source_type = 2; @@ -15,7 +15,7 @@ message Timeserie { uint32 rrd_retention = 4; } -/*io::storage, storage::de_rebuild_message*/ +/* io::storage, storage::de_rebuild_message, 2 */ message RebuildMessage { enum State { START = 0; diff --git a/bbdo/remove_graph_message.proto b/bbdo/remove_graph_message.proto index 8c35c876890..0b0ef5538a6 100644 --- a/bbdo/remove_graph_message.proto +++ b/bbdo/remove_graph_message.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package com.centreon.broker; -/*io::storage, storage::de_remove_graph_message*/ +/* io::storage, storage::de_remove_graph_message, 4 */ message RemoveGraphMessage { repeated uint64 index_ids = 1; repeated uint64 metric_ids = 2; diff --git a/bbdo/severity.proto b/bbdo/severity.proto index 44781d3af1c..4313ad599f2 100644 --- a/bbdo/severity.proto +++ b/bbdo/severity.proto @@ -35,7 +35,7 @@ package com.centreon.broker; * relations between resources of our poller and this severity. And only if the * severity is no more used at all, we can remove it. */ -/*io::neb, neb::de_pb_severity*/ +/*io::neb, neb::de_pb_severity, 29 */ message Severity { uint64 id = 1; enum Action { diff --git a/bbdo/storage.proto b/bbdo/storage.proto index a0544da87bd..0be01e3b349 100644 --- a/bbdo/storage.proto +++ b/bbdo/storage.proto @@ -20,7 +20,7 @@ syntax = "proto3"; package com.centreon.broker; -/*io::storage, storage::de_pb_metric*/ +/* io::storage, storage::de_pb_metric, 44 */ message Metric { enum ValueType { GAUGE = 0; @@ -40,7 +40,7 @@ message Metric { uint64 service_id = 12; } -/*io::storage, storage::de_pb_status*/ +/* io::storage, storage::de_pb_status, 45 */ message Status { uint64 index_id = 1; uint32 interval = 2; @@ -51,14 +51,14 @@ message Status { uint64 service_id = 7; } -/*io::storage, storage::de_pb_index_mapping*/ +/* io::storage, storage::de_pb_index_mapping, 46 */ message IndexMapping { uint64 index_id = 1; uint64 host_id = 2; uint64 service_id = 3; } -/*io::storage, storage::de_pb_metric_mapping*/ +/* io::storage, storage::de_pb_metric_mapping, 47 */ message MetricMapping { uint64 index_id = 1; uint64 metric_id = 2; diff --git a/bbdo/tag.proto b/bbdo/tag.proto index d12898d8470..ce5b945dc7e 100644 --- a/bbdo/tag.proto +++ b/bbdo/tag.proto @@ -41,7 +41,7 @@ enum TagType { HOSTCATEGORY = 3; } -/*io::neb, neb::de_pb_tag*/ +/* io::neb, neb::de_pb_tag, 30 */ message Tag { uint64 id = 1; enum Action { @@ -55,7 +55,7 @@ message Tag { string name = 4; int64 poller_id = 5; } - +/* Ignore */ message TagInfo { uint64 id = 1; TagType type = 2; diff --git a/broker/core/sql/src/mysql_connection.cc b/broker/core/sql/src/mysql_connection.cc index 5c6d2548bba..0951a439b21 100644 --- a/broker/core/sql/src/mysql_connection.cc +++ b/broker/core/sql/src/mysql_connection.cc @@ -16,6 +16,7 @@ * For more information : contact@centreon.com */ #include +#include #include "com/centreon/broker/config/applier/init.hh" #include "com/centreon/broker/misc/misc.hh" @@ -460,18 +461,26 @@ void mysql_connection::_statement(mysql_task* t) { "mysql_connection {:p}: execute statement {:x} attempt {}: {}", static_cast(this), task->statement_id, attempts, query); if (mysql_stmt_execute(stmt)) { - std::string err_msg( - fmt::format("{} errno={} {}", mysql_error::msg[task->error_code], - ::mysql_errno(_conn), ::mysql_stmt_error(stmt))); - SPDLOG_LOGGER_ERROR(_logger, - "connection fail to execute statement {:p}: {}", - static_cast(this), err_msg); - if (_server_error(::mysql_stmt_errno(stmt))) { + int32_t err_code = ::mysql_stmt_errno(stmt); + std::string err_msg(fmt::format("{} errno={} {}", + mysql_error::msg[task->error_code], + err_code, ::mysql_stmt_error(stmt))); + if (err_code == 0) { + SPDLOG_LOGGER_TRACE(_logger, + "mysql_connection: errno=0, so we simulate a " + "server error CR_SERVER_LOST"); + err_code = CR_SERVER_LOST; + } else { + SPDLOG_LOGGER_ERROR(_logger, + "connection fail to execute statement {:p}: {}", + static_cast(this), err_msg); + } + if (_server_error(err_code)) { set_error_message(err_msg); break; } - if (mysql_stmt_errno(stmt) != 1213 && - mysql_stmt_errno(stmt) != 1205) // Dead Lock error + if (err_code != ER_LOCK_DEADLOCK && + err_code != ER_LOCK_WAIT_TIMEOUT) // Dead Lock error attempts = MAX_ATTEMPTS; if (mysql_commit(_conn)) { diff --git a/broker/core/sql/src/mysql_multi_insert.cc b/broker/core/sql/src/mysql_multi_insert.cc index cafc020e386..7d375cb82cd 100644 --- a/broker/core/sql/src/mysql_multi_insert.cc +++ b/broker/core/sql/src/mysql_multi_insert.cc @@ -132,7 +132,11 @@ void bulk_or_multi::execute(mysql& connexion, my_error::code ec, int thread_id) { if (_bulk_stmt) { - if (!_bulk_bind->empty()) { + /* If the database connection is lost, we can have this issue */ + if (!_bulk_bind) { + _bulk_bind = _bulk_stmt->create_bind(); + _bulk_bind->reserve(_bulk_row); + } else if (!_bulk_bind->empty()) { _bulk_stmt->set_bind(std::move(_bulk_bind)); connexion.run_statement(*_bulk_stmt, ec, thread_id); _bulk_bind = _bulk_stmt->create_bind(); diff --git a/broker/grpc/generate_proto.py b/broker/grpc/generate_proto.py index d681e223b9a..c544b07605f 100755 --- a/broker/grpc/generate_proto.py +++ b/broker/grpc/generate_proto.py @@ -21,6 +21,7 @@ from os.path import isfile, join import re import argparse +import sys file_begin_content = """syntax = "proto3"; @@ -32,7 +33,6 @@ message CentreonEvent { oneof content { bytes buffer = 1; - """ cc_file_begin_content = """/** @@ -177,45 +177,66 @@ class received_protobuf : public io::protobuf { args = parser.parse_args() message_parser = r'^message\s+(\w+)\s+\{' -io_protobuf_parser = r'\/\*\s*(\w+::\w+\s*,\s*\w+::\w+)\s*\*\/' +io_protobuf_parser = r'\/\*\s*(\w+::\w+\s*,\s*\w+::\w+)\s*,\s*(\d+)\s*\*\/' +ignore_message = "/* Ignore */" one_of_index = 2 +message_save = [] for directory in args.proto_directory: proto_files = [f for f in listdir( directory) if f[-6:] == ".proto" and isfile(join(directory, f))] for file in proto_files: + line_counter = 0 + flag_ignore = False with open(join(directory, file)) as proto_file: messages = [] io_protobuf_match = None for line in proto_file.readlines(): + line_counter += 1 m = re.match(message_parser, line) if m is not None and io_protobuf_match is not None: - messages.append([m.group(1), io_protobuf_match.group(1)]) + messages.append([m.group(1), io_protobuf_match.group(1), io_protobuf_match.group(2)]) io_protobuf_match = None + flag_ignore = True else: io_protobuf_match = re.match(io_protobuf_parser, line) - if len(messages) > 0: + #check if no bbo message have the comment: Ignore + if ignore_message in line: + flag_ignore = True + #check if message have comment ignore or it's bbo message + if flag_ignore and m is not None: + flag_ignore = False + elif not flag_ignore and m is not None : + print (f"generate_proto.py : Error: Message {{ {m.group(1)} }} has no protobuf id or missing the comment /* Ignore */ : file :{file}:{line_counter}",file=sys.stderr) + print (f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state*/",file=sys.stderr) + exit(1) + + if len(messages) > 0: file_begin_content += f"import \"{file}\";\n" - for mess, id in messages: - # proto file - file_message_centreon_event += f" {mess} {mess}_ = {one_of_index};\n" - one_of_index += 1 - lower_mess = mess.lower() - # cc file - cc_file_protobuf_to_event_function += f""" case ::stream::CentreonEvent::k{mess}: - return std::make_shared>( - stream_content, &grpc_event_type::{lower_mess}_, - &grpc_event_type::mutable_{lower_mess}_); + message_save += messages +#sort the message with index (io_protobuf_match.group(2)) +message_save.sort(key=lambda x: int(x[2])) +for mess, id, index in message_save: + # proto file + file_message_centreon_event += f" {mess} {mess}_ = {index};\n" + # count index : needed for opentelemetry + one_of_index += 1 + lower_mess = mess.lower() + # cc file + cc_file_protobuf_to_event_function += f""" case ::stream::CentreonEvent::k{mess}: +return std::make_shared>( + stream_content, &grpc_event_type::{lower_mess}_, + &grpc_event_type::mutable_{lower_mess}_); """ - cc_file_create_event_with_data_function += f""" case make_type({id}): - ret = std::make_shared( - event, reinterpret_cast( - &grpc_event_type::release_{lower_mess}_)); - ret->grpc_event.set_allocated_{lower_mess}_(&std::static_pointer_cast>(event)->mut_obj()); - break; + cc_file_create_event_with_data_function += f""" case make_type({id}): + ret = std::make_shared( + event, reinterpret_cast( + &grpc_event_type::release_{lower_mess}_)); + ret->grpc_event.set_allocated_{lower_mess}_(&std::static_pointer_cast>(event)->mut_obj()); + break; """ diff --git a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh index 616f2afe377..d9f04f48fe2 100644 --- a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh +++ b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh @@ -27,6 +27,7 @@ #include "com/centreon/broker/neb/host_group.hh" #include "com/centreon/broker/neb/host_group_member.hh" #include "com/centreon/broker/neb/instance.hh" +#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service.hh" #include "com/centreon/broker/neb/service_group.hh" #include "com/centreon/broker/neb/service_group_member.hh" @@ -42,14 +43,26 @@ class macro_cache { std::shared_ptr _cache; absl::flat_hash_map> _instances; absl::flat_hash_map> _hosts; - absl::flat_hash_map> _host_groups; + /* The host groups cache stores also a set with the pollers telling they need + * the cache. So if no more poller needs a host group, we can remove it from + * the cache. */ + absl::flat_hash_map, + absl::flat_hash_set>> + _host_groups; absl::btree_map, std::shared_ptr> _host_group_members; absl::flat_hash_map, std::shared_ptr> _custom_vars; absl::flat_hash_map, std::shared_ptr> _services; - absl::flat_hash_map> _service_groups; + /* The service groups cache stores also a set with the pollers telling they + * need the cache. So if no more poller needs a service group, we can remove + * it from the cache. */ + absl::flat_hash_map, + absl::flat_hash_set>> + _service_groups; absl::btree_map, std::shared_ptr> _service_group_members; diff --git a/broker/lua/src/broker_utils.cc b/broker/lua/src/broker_utils.cc index 01e8a30fec1..054733ba992 100644 --- a/broker/lua/src/broker_utils.cc +++ b/broker/lua/src/broker_utils.cc @@ -24,7 +24,7 @@ #include "absl/strings/string_view.h" #include "com/centreon/broker/config/applier/state.hh" -#include +#include #include #include #include @@ -810,6 +810,34 @@ static int l_broker_stat(lua_State* L) { } } +static void md5_message(const unsigned char* message, + size_t message_len, + unsigned char** digest, + unsigned int* digest_len) { + EVP_MD_CTX* mdctx; + auto handle_error = [](const std::string& msg) { + auto logger = log_v2::instance().get(log_v2::LUA); + logger->error(msg); + }; + if ((mdctx = EVP_MD_CTX_new()) == nullptr) { + handle_error("lua: fail to call MD5 (EVP_MD_CTX_new call)"); + } + if (1 != EVP_DigestInit_ex(mdctx, EVP_md5(), nullptr)) { + handle_error("lua: fail to call MD5 (EVP_DigestInit_ex call)"); + } + if (1 != EVP_DigestUpdate(mdctx, message, message_len)) { + handle_error("lua: fail to call MD5 (EVP_DigestUpdate call)"); + } + if ((*digest = (unsigned char*)OPENSSL_malloc(EVP_MD_size(EVP_md5()))) == + nullptr) { + handle_error("lua: fail to call MD5 (OPENSSL_malloc call)"); + } + if (1 != EVP_DigestFinal_ex(mdctx, *digest, digest_len)) { + handle_error("lua: fail to call MD5 (EVP_DigestFinal_ex call)"); + } + EVP_MD_CTX_free(mdctx); +} + static int l_broker_md5(lua_State* L) { auto digit = [](unsigned char d) -> char { if (d < 10) @@ -820,11 +848,12 @@ static int l_broker_md5(lua_State* L) { size_t len; const unsigned char* str = reinterpret_cast(lua_tolstring(L, -1, &len)); - unsigned char md5[MD5_DIGEST_LENGTH]; - MD5(str, len, md5); - char result[2 * MD5_DIGEST_LENGTH + 1]; + unsigned char* md5; + uint32_t md5_len; + md5_message(str, len, &md5, &md5_len); + char result[2 * md5_len + 1]; char* tmp = result; - for (int i = 0; i < MD5_DIGEST_LENGTH; i++) { + for (uint32_t i = 0; i < md5_len; i++) { *tmp = digit(md5[i] >> 4); ++tmp; *tmp = digit(md5[i] & 0xf); @@ -832,6 +861,7 @@ static int l_broker_md5(lua_State* L) { } *tmp = 0; lua_pushstring(L, result); + OPENSSL_free(md5); return 1; } diff --git a/broker/lua/src/macro_cache.cc b/broker/lua/src/macro_cache.cc index 0c848b5d092..1c92d24cb31 100644 --- a/broker/lua/src/macro_cache.cc +++ b/broker/lua/src/macro_cache.cc @@ -1,5 +1,5 @@ /** - * Copyright 2017-2022 Centreon + * Copyright 2017-2024 Centreon * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,11 +17,14 @@ */ #include "com/centreon/broker/lua/macro_cache.hh" +#include +#include #include "bbdo/bam/dimension_ba_bv_relation_event.hh" #include "bbdo/bam/dimension_ba_event.hh" #include "bbdo/bam/dimension_bv_event.hh" #include "bbdo/storage/index_mapping.hh" #include "bbdo/storage/metric_mapping.hh" +#include "com/centreon/broker/neb/internal.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -367,17 +370,15 @@ macro_cache::get_host_group_members() const { * * @return The name of the host group. */ -std::string const& macro_cache::get_host_group_name(uint64_t id) const { - auto const found = _host_groups.find(id); +const std::string& macro_cache::get_host_group_name(uint64_t id) const { + const auto found = _host_groups.find(id); - if (found == _host_groups.end()) + if (found == _host_groups.end()) { + _cache->logger()->error("lua: could not find information on host group {}", + id); throw msg_fmt("lua: could not find information on host group {}", id); - if (found->second->type() == neb::host_group::static_type()) - return std::static_pointer_cast(found->second)->name; - else - return std::static_pointer_cast(found->second) - ->obj() - .name(); + } + return found->second.first->obj().name(); } /** @@ -428,14 +429,12 @@ macro_cache::get_service_group_members() const { std::string const& macro_cache::get_service_group_name(uint64_t id) const { auto found = _service_groups.find(id); - if (found == _service_groups.end()) + if (found == _service_groups.end()) { + _cache->logger()->error( + "lua: could not find information on service group {}", id); throw msg_fmt("lua: could not find information on service group {}", id); - if (found->second->type() == neb::service_group::static_type()) - return std::static_pointer_cast(found->second)->name; - else - return std::static_pointer_cast(found->second) - ->obj() - .name(); + } + return found->second.first->obj().name(); } /** @@ -830,14 +829,42 @@ void macro_cache::_process_pb_adaptive_host( * @param data The event. */ void macro_cache::_process_host_group(std::shared_ptr const& data) { - std::shared_ptr const& hg = + const std::shared_ptr& hg = std::static_pointer_cast(data); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing host group '{}' of id {} enabled: {}", hg->name, hg->id, hg->enabled); - if (hg->enabled) - _host_groups[hg->id] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (hg->enabled) { + auto found = _host_groups.find(hg->id); + if (found != _host_groups.end()) { + /* here, we complete the set of pollers */ + found->second.second.insert(hg->poller_id); + found->second.first->mut_obj().set_name(hg->name); + } else { + /* Here, we add the hostgroup and the first poller that needs it */ + absl::flat_hash_set pollers{hg->poller_id}; + auto pb_hg = std::make_shared(); + auto& obj = pb_hg->mut_obj(); + obj.set_enabled(hg->enabled); + obj.set_hostgroup_id(hg->id); + obj.set_name(hg->name); + obj.set_poller_id(hg->poller_id); + _host_groups[hg->id] = std::make_pair(std::move(pb_hg), pollers); + } + } else { + /* We check that no more pollers need this host group. So if the set is + * empty, we can also remove the host group. */ + auto found = _host_groups.find(hg->id); + if (found != _host_groups.end()) { + auto f = found->second.second.find(hg->poller_id); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _host_groups.erase(found); + } + } + } + } } /** @@ -846,15 +873,39 @@ void macro_cache::_process_host_group(std::shared_ptr const& data) { * @param data The event. */ void macro_cache::_process_pb_host_group( - std::shared_ptr const& data) { - const HostGroup& hg = - std::static_pointer_cast(data)->obj(); + const std::shared_ptr& data) { + auto pb_hg = std::static_pointer_cast(data); + const HostGroup& hg = pb_hg->obj(); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing pb host group '{}' of id {}, enabled {}", hg.name(), hg.hostgroup_id(), hg.enabled()); - if (hg.enabled()) - _host_groups[hg.hostgroup_id()] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (hg.enabled()) { + auto found = _host_groups.find(hg.hostgroup_id()); + if (found != _host_groups.end()) { + found->second.second.insert(hg.poller_id()); + HostGroup& current_hg = + std::static_pointer_cast(found->second.first) + ->mut_obj(); + current_hg.set_name(hg.name()); + } else { + absl::flat_hash_set pollers{hg.poller_id()}; + _host_groups[hg.hostgroup_id()] = + std::make_pair(std::move(pb_hg), pollers); + } + } else { + /* We check that no more pollers need this host group. So if the set is + * empty, we can also remove the host group. */ + auto found = _host_groups.find(hg.hostgroup_id()); + if (found != _host_groups.end()) { + auto f = found->second.second.find(hg.poller_id()); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _host_groups.erase(found); + } + } + } + } } /** @@ -1113,9 +1164,37 @@ void macro_cache::_process_service_group( SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing service group '{}' of id {}", sg->name, sg->id); - if (sg->enabled) - _service_groups[sg->id] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (sg->enabled) { + auto found = _service_groups.find(sg->id); + if (found != _service_groups.end()) { + /* here, we complete the set of pollers */ + found->second.second.insert(sg->poller_id); + found->second.first->mut_obj().set_name(sg->name); + } else { + /* Here, we add the servicegroup and the first poller that needs it */ + absl::flat_hash_set pollers{sg->poller_id}; + auto pb_sg = std::make_shared(); + auto& obj = pb_sg->mut_obj(); + obj.set_servicegroup_id(sg->id); + obj.set_enabled(sg->enabled); + obj.set_name(sg->name); + obj.set_poller_id(sg->poller_id); + _service_groups[sg->id] = std::make_pair(std::move(pb_sg), pollers); + } + } else { + /* We check that no more pollers need this service group. So if the set is + * empty, we can also remove the service group. */ + auto found = _service_groups.find(sg->id); + if (found != _service_groups.end()) { + auto f = found->second.second.find(sg->poller_id); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _service_groups.erase(found); + } + } + } + } } /** @@ -1124,15 +1203,38 @@ void macro_cache::_process_service_group( * @param sg The event. */ void macro_cache::_process_pb_service_group( - std::shared_ptr const& data) { - const ServiceGroup& sg = - std::static_pointer_cast(data)->obj(); + const std::shared_ptr& data) { + auto pb_sg = std::static_pointer_cast(data); + const ServiceGroup& sg = pb_sg->obj(); SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing pb service group '{}' of id {}", sg.name(), sg.servicegroup_id()); - if (sg.enabled()) - _service_groups[sg.servicegroup_id()] = data; - // erasure is desactivated because a group cen be owned by several pollers + if (sg.enabled()) { + auto found = _service_groups.find(sg.servicegroup_id()); + if (found != _service_groups.end()) { + found->second.second.insert(sg.poller_id()); + ServiceGroup& current_sg = found->second.first->mut_obj(); + current_sg.set_name(sg.name()); + } else { + /* Here, we add the servicegroup and the first poller that needs it */ + absl::flat_hash_set pollers{sg.poller_id()}; + _service_groups[sg.servicegroup_id()] = + std::make_pair(std::move(pb_sg), pollers); + } + } else { + /* We check that no more pollers need this service group. So if the set is + * empty, we can also remove the service group. */ + auto found = _service_groups.find(sg.servicegroup_id()); + if (found != _service_groups.end()) { + auto f = found->second.second.find(sg.poller_id()); + if (f != found->second.second.end()) { + found->second.second.erase(f); + if (found->second.second.empty()) { + _service_groups.erase(found); + } + } + } + } } /** @@ -1166,12 +1268,12 @@ void macro_cache::_process_pb_service_group_member( std::shared_ptr const& data) { const ServiceGroupMember& sgm = std::static_pointer_cast(data)->obj(); - SPDLOG_LOGGER_DEBUG( - _cache->logger(), - "lua: processing pb service group member (group_name: {}, group_id: {}, " - "host_id: {}, service_id: {} enabled: {}", - sgm.name(), sgm.servicegroup_id(), sgm.host_id(), sgm.service_id(), - sgm.enabled()); + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing pb service group member (group_name: " + "{}, group_id: {}, " + "host_id: {}, service_id: {} enabled: {}", + sgm.name(), sgm.servicegroup_id(), sgm.host_id(), + sgm.service_id(), sgm.enabled()); if (sgm.enabled()) _service_group_members[std::make_tuple(sgm.host_id(), sgm.service_id(), sgm.servicegroup_id())] = data; @@ -1290,10 +1392,10 @@ void macro_cache::_process_dimension_ba_bv_relation_event( } else { auto const& rel = std::static_pointer_cast(data); - SPDLOG_LOGGER_DEBUG( - _cache->logger(), - "lua: processing dimension ba bv relation event (ba_id: {}, bv_id: {})", - rel->ba_id, rel->bv_id); + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing dimension ba bv relation event " + "(ba_id: {}, bv_id: {})", + rel->ba_id, rel->bv_id); auto pb_data(std::make_shared()); pb_data->mut_obj().set_ba_id(rel->ba_id); pb_data->mut_obj().set_bv_id(rel->bv_id); @@ -1372,11 +1474,11 @@ void macro_cache::_process_custom_variable( std::shared_ptr const& data) { auto const& cv = std::static_pointer_cast(data); if (cv->name == "CRITICALITY_LEVEL") { - SPDLOG_LOGGER_DEBUG( - _cache->logger(), - "lua: processing custom variable representing a criticality level for " - "host_id {} and service_id {} and level {}", - cv->host_id, cv->service_id, cv->value); + SPDLOG_LOGGER_DEBUG(_cache->logger(), + "lua: processing custom variable representing a " + "criticality level for " + "host_id {} and service_id {} and level {}", + cv->host_id, cv->service_id, cv->value); int32_t value = std::atoi(cv->value.c_str()); if (value) _custom_vars[{cv->host_id, cv->service_id}] = cv; @@ -1427,8 +1529,13 @@ void macro_cache::_save_to_disk() { for (auto it(_hosts.begin()), end(_hosts.end()); it != end; ++it) _cache->add(it->second); - for (auto it(_host_groups.begin()), end(_host_groups.end()); it != end; ++it) - _cache->add(it->second); + for (auto it = _host_groups.begin(), end = _host_groups.end(); it != end; + ++it) { + for (auto poller_id : it->second.second) { + it->second.first->mut_obj().set_poller_id(poller_id); + _cache->add(it->second.first); + } + } for (auto it(_host_group_members.begin()), end(_host_group_members.end()); it != end; ++it) @@ -1437,9 +1544,13 @@ void macro_cache::_save_to_disk() { for (auto it(_services.begin()), end(_services.end()); it != end; ++it) _cache->add(it->second); - for (auto it(_service_groups.begin()), end(_service_groups.end()); it != end; - ++it) - _cache->add(it->second); + for (auto it = _service_groups.begin(), end = _service_groups.end(); + it != end; ++it) { + for (auto poller_id : it->second.second) { + it->second.first->mut_obj().set_poller_id(poller_id); + _cache->add(it->second.first); + } + } for (auto it = _service_group_members.begin(), end = _service_group_members.end(); diff --git a/broker/lua/test/lua.cc b/broker/lua/test/lua.cc index d002cf5956d..8268c693b3e 100644 --- a/broker/lua/test/lua.cc +++ b/broker/lua/test/lua.cc @@ -1442,6 +1442,7 @@ TEST_F(LuaTest, ServiceGroupCacheTestName) { auto sg{std::make_shared()}; sg->id = 28; sg->name = "centreon"; + sg->enabled = true; _cache->write(sg); CreateScript(filename, diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index edd91e546ed..adf4cbe0269 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -1664,7 +1664,7 @@ void stream::_process_pb_host_group_member(const std::shared_ptr& d) { } std::string query = fmt::format( - "DELETE FROM hosts_hostgroup WHERE host_id={} and hostgroup_id = {}", + "DELETE FROM hosts_hostgroups WHERE host_id={} and hostgroup_id = {}", hgm.host_id(), hgm.hostgroup_id()); _mysql.run_query(query, database::mysql_error::delete_host_group_member, diff --git a/common/process/CMakeLists.txt b/common/process/CMakeLists.txt index f79bbaaa657..22235468ac6 100644 --- a/common/process/CMakeLists.txt +++ b/common/process/CMakeLists.txt @@ -27,4 +27,8 @@ add_library( target_precompile_headers(centreon_process REUSE_FROM centreon_common) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows") + target_link_libraries(centreon_process INTERFACE Boost::process) +endif() + set_property(TARGET centreon_process PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/common/src/perfdata.cc b/common/src/perfdata.cc index 80945b75950..0d6f5b89af3 100644 --- a/common/src/perfdata.cc +++ b/common/src/perfdata.cc @@ -265,18 +265,21 @@ std::list perfdata::parse_perfdata( /* The label is given by s and finishes at end */ if (*end == ']') { - --end; if (strncmp(s, "a[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::absolute; } else if (strncmp(s, "c[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::counter; } else if (strncmp(s, "d[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::derive; } else if (strncmp(s, "g[", 2) == 0) { s += 2; + --end; p._value_type = perfdata::data_type::gauge; } } diff --git a/common/tests/perfdata_test.cc b/common/tests/perfdata_test.cc index bab234f9522..c64d9fe623a 100644 --- a/common/tests/perfdata_test.cc +++ b/common/tests/perfdata_test.cc @@ -623,3 +623,18 @@ TEST_F(PerfdataParser, BadMetric1) { ++i; } } + +TEST_F(PerfdataParser, ExtractPerfdataBrackets) { + std::string perfdata( + "'xx[aa a aa]'=2;3;7;1;9 '[a aa]'=12;25;50;0;118 'aa a]'=28;13;54;0;80"); + auto lst{common::perfdata::parse_perfdata(0, 0, perfdata.c_str(), _logger)}; + auto it = lst.begin(); + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "xx[aa a aa]"); + ++it; + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "[a aa]"); + ++it; + ASSERT_NE(it, lst.end()); + ASSERT_EQ(it->name(), "aa a]"); +} diff --git a/engine/enginerpc/CMakeLists.txt b/engine/enginerpc/CMakeLists.txt index 80534a5d33e..ed3fc8e65fc 100644 --- a/engine/enginerpc/CMakeLists.txt +++ b/engine/enginerpc/CMakeLists.txt @@ -67,7 +67,7 @@ add_library( # Headers. "${INC_DIR}/engine_impl.hh" "${INC_DIR}/enginerpc.hh") -add_dependencies(${ENGINERPC} centreon_common) +add_dependencies(${ENGINERPC} centreon_common engine_rpc) target_precompile_headers(${ENGINERPC} PRIVATE precomp_inc/precomp.hh) diff --git a/engine/modules/opentelemetry/CMakeLists.txt b/engine/modules/opentelemetry/CMakeLists.txt index 2da7c0972ec..229e83ed809 100644 --- a/engine/modules/opentelemetry/CMakeLists.txt +++ b/engine/modules/opentelemetry/CMakeLists.txt @@ -34,7 +34,7 @@ foreach(name IN LISTS service_files) COMMAND ${Protobuf_PROTOC_EXECUTABLE} ARGS --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} - --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto --grpc_out=${SRC_DIR} ${proto_file} VERBATIM WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) @@ -61,7 +61,7 @@ add_custom_command( WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # mod_externalcmd target. -add_library(opentelemetry SHARED +add_library(opentelemetry SHARED ${SRC_DIR}/centreon_agent/agent.grpc.pb.cc ${SRC_DIR}/centreon_agent/agent.pb.cc ${SRC_DIR}/centreon_agent/agent_check_result_builder.cc @@ -86,25 +86,27 @@ ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc target_precompile_headers(opentelemetry PRIVATE precomp_inc/precomp.hh) # set(EXTERNALCMD_MODULE "${EXTERNALCMD_MODULE}" PARENT_SCOPE) -target_link_libraries(opentelemetry +target_link_libraries(opentelemetry spdlog::spdlog -L${Boost_LIBRARY_DIR_RELEASE} boost_program_options) -add_dependencies(opentelemetry +add_dependencies(opentelemetry pb_open_telemetry_lib + engine_rpc pb_neb_lib + engine_rpc pb_tag_lib) -target_include_directories(opentelemetry PRIVATE - "${MODULE_DIR}/inc/com/centreon/engine/modules/opentelemetry" - "${CMAKE_SOURCE_DIR}/bbdo" +target_include_directories(opentelemetry PRIVATE + "${MODULE_DIR}/inc/com/centreon/engine/modules/opentelemetry" + "${CMAKE_SOURCE_DIR}/bbdo" "${MODULE_DIR}/inc" ${CMAKE_SOURCE_DIR}/common/inc - ${CMAKE_SOURCE_DIR}/common/http/inc - ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/common/http/inc + ${CMAKE_SOURCE_DIR}/common/grpc/inc src - ${PROJECT_SOURCE_DIR}/enginerpc + ${PROJECT_SOURCE_DIR}/enginerpc ${CMAKE_SOURCE_DIR}/common/src ) diff --git a/engine/tests/string/string.cc b/engine/tests/string/string.cc index 3486ba7e15d..e0adeb7217d 100644 --- a/engine/tests/string/string.cc +++ b/engine/tests/string/string.cc @@ -62,6 +62,17 @@ TEST(string_utils, extractPerfdataGaugeDiff) { "d[aa a]=28;13;54;0;80"); } +TEST(string_utils, extractPerfdataBrackets) { + std::string perfdata( + "'xx[aa a aa]'=2;3;7;1;9 '[a aa]'=12;25;50;0;118 'aa a]'=28;13;54;0;80"); + ASSERT_EQ(string::extract_perfdata(perfdata, "xx[aa a aa]"), + "'xx[aa a aa]'=2;3;7;1;9"); + ASSERT_EQ(string::extract_perfdata(perfdata, "[a aa]"), + "'[a aa]'=12;25;50;0;118"); + ASSERT_EQ(string::extract_perfdata(perfdata, "aa a]"), + "'aa a]'=28;13;54;0;80"); +} + TEST(string_utils, removeThresholdsWithoutThresholds) { std::string perfdata("a=2V"); ASSERT_EQ(string::remove_thresholds(perfdata), "a=2V"); diff --git a/gorgone/packaging/centreon-gorgone-centreon-config.yaml b/gorgone/packaging/centreon-gorgone-centreon-config.yaml index fbf3e808d30..bdf8a469953 100644 --- a/gorgone/packaging/centreon-gorgone-centreon-config.yaml +++ b/gorgone/packaging/centreon-gorgone-centreon-config.yaml @@ -53,9 +53,11 @@ overrides: rpm: depends: - centreon-gorgone = ${VERSION}-${RELEASE}${DIST} + - centreon-common deb: depends: - centreon-gorgone (= ${VERSION}-${RELEASE}${DIST}) + - centreon-common replaces: - centreon-gorgone (<< 24.04.0) diff --git a/gorgone/packaging/centreon-gorgone.yaml b/gorgone/packaging/centreon-gorgone.yaml index 8df55cd3ed5..a2a287d7004 100644 --- a/gorgone/packaging/centreon-gorgone.yaml +++ b/gorgone/packaging/centreon-gorgone.yaml @@ -151,7 +151,6 @@ scripts: overrides: rpm: depends: - - centreon-common - bzip2 - perl-Libssh-Session >= 0.8 - perl-CryptX @@ -185,8 +184,7 @@ overrides: - tar - perl(lib) deb: - depends: # those dependencies are taken from centreon-gorgone/packaging/debian/control - - centreon-common + depends: - libdatetime-perl - libtime-parsedate-perl - libtry-tiny-perl diff --git a/tests/broker-engine/services-and-bulk-stmt.robot b/tests/broker-engine/services-and-bulk-stmt.robot index 5662fbdba12..8464aaf4eb7 100644 --- a/tests/broker-engine/services-and-bulk-stmt.robot +++ b/tests/broker-engine/services-and-bulk-stmt.robot @@ -35,6 +35,7 @@ EBBPS1 Should Be True ... ${result} ... An Initial service state on host_1:service_1000 should be raised before we can start external commands. + FOR ${i} IN RANGE ${1000} Ctn Process Service Check Result host_1 service_${i+1} 1 warning${i} END @@ -55,6 +56,7 @@ EBBPS1 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database FOR ${i} IN RANGE ${1000} Ctn Process Service Check Result host_1 service_${i+1} 2 warning${i} @@ -92,6 +94,7 @@ EBBPS1 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database EBBPS2 [Documentation] 1000 service check results are sent to the poller. The test is done with the unified_sql stream, no service status is lost, we find the 1000 results in the database: table services. @@ -112,7 +115,7 @@ EBBPS2 ${start} Get Current Date ${start_broker} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine ${content} Create List INITIAL SERVICE STATE: host_1;service_1000; ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 30 Should Be True @@ -138,6 +141,7 @@ EBBPS2 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database FOR ${i} IN RANGE ${1000} Ctn Process Service Check Result host_1 service_${i+1} 2 critical${i} @@ -174,6 +178,7 @@ EBBPS2 IF "${output}" == "((0,),)" BREAK END Should Be Equal As Strings ${output} ((0,),) + Disconnect From Database EBMSSM [Documentation] 1000 services are configured with 100 metrics each. The rrd output is removed from the broker configuration. GetSqlManagerStats is called to measure writes into data_bin. @@ -194,7 +199,7 @@ EBMSSM Ctn Clear Retention ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Broker Set Sql Manager Stats 51001 5 5 # Let's wait for the external command check start @@ -220,6 +225,7 @@ EBMSSM Sleep 1s END Should Be True ${output[0][0]} >= 100000 + Disconnect From Database EBPS2 [Documentation] 1000 services are configured with 20 metrics each. The rrd output is removed from the broker configuration to avoid to write too many rrd files. While metrics are written in bulk, the database is stopped. This must not crash broker. @@ -243,7 +249,7 @@ EBPS2 ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine # Let's wait for the external command check start ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -297,7 +303,7 @@ RLCode ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -367,7 +373,7 @@ metric_mapping ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine ${content} Create List check_for_external_commands() ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 @@ -407,7 +413,7 @@ Services_and_bulks_${id} ${start} Get Current Date Ctn Start Broker - Ctn Start engine + Ctn Start Engine Ctn Broker Set Sql Manager Stats 51001 5 5 # Let's wait for the external command check start @@ -438,6 +444,145 @@ Services_and_bulks_${id} ... 1 1020 ... 2 150 +EBMSSMDBD + [Documentation] 1000 services are configured with 100 metrics each. + ... The rrd output is removed from the broker configuration. + ... While metrics are written in the database, we stop the database and then restart it. + ... Broker must recover its connection to the database and continue to write metrics. + [Tags] broker engine unified_sql MON-153321 + Ctn Clear Metrics + Ctn Config Engine ${1} ${1} ${1000} + # We want all the services to be passive to avoid parasite checks during our test. + Ctn Set Services Passive ${0} service_.* + Ctn Config Broker central + Ctn Config Broker rrd + Ctn Config Broker module ${1} + Ctn Config BBDO3 1 + Ctn Broker Config Log central core error + Ctn Broker Config Log central tcp error + Ctn Broker Config Log central sql debug + Ctn Config Broker Sql Output central unified_sql + Ctn Config Broker Remove Rrd Output central + Ctn Clear Retention + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + + ${content} Create List check_for_external_commands() + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message about check_for_external_commands() should be available. + + ${start} Ctn Get Round Current Date + # Let's wait for one "INSERT INTO data_bin" to appear in stats. + Log To Console Many service checks with 100 metrics each are processed. + FOR ${i} IN RANGE ${1000} + Ctn Process Service Check Result With Metrics host_1 service_${i+1} 1 warning${i} 100 + END + + Log To Console We wait for at least one metric to be written in the database. + # Let's wait for all force checks to be in the storage database. + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + FOR ${i} IN RANGE ${500} + ${output} Query + ... SELECT COUNT(s.last_check) FROM metrics m LEFT JOIN index_data i ON m.index_id = i.id LEFT JOIN services s ON s.host_id = i.host_id AND s.service_id = i.service_id WHERE metric_name LIKE "metric_%%" AND s.last_check >= ${start} + IF ${output[0][0]} >= 1 BREAK + Sleep 1s + END + Disconnect From Database + + Log To Console Let's start some database manipulation... + ${start} Get Current Date + + FOR ${i} IN RANGE ${3} + Ctn Stop Mysql + Sleep 10s + Ctn Start Mysql + ${content} Create List could not insert data in data_bin + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 10 + Log To Console ${result} + END + +EBMSSMPART + [Documentation] 1000 services are configured with 100 metrics each. + ... The rrd output is removed from the broker configuration. + ... The data_bin table is configured with two partitions p1 and p2 such + ... that p1 contains old data and p2 contains current data. + ... While metrics are written in the database, we remove the p2 partition. + ... Once the p2 partition is recreated, broker must recover its connection + ... to the database and continue to write metrics. + ... To check that last point, we force a last service check and we check + ... that its metrics are written in the database. + [Tags] broker engine unified_sql MON-153321 + Ctn Clear Metrics + Ctn Config Engine ${1} ${1} ${1000} + # We want all the services to be passive to avoid parasite checks during our test. + Ctn Set Services Passive ${0} service_.* + Ctn Config Broker central + Ctn Config Broker rrd + Ctn Config Broker module ${1} + Ctn Config BBDO3 1 + Ctn Broker Config Log central core error + Ctn Broker Config Log central tcp error + Ctn Broker Config Log central sql trace + Ctn Config Broker Sql Output central unified_sql + Ctn Config Broker Remove Rrd Output central + Ctn Clear Retention + + Ctn Prepare Partitions For Data Bin + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + + Ctn Wait For Engine To Be Ready ${start} 1 + + ${start} Ctn Get Round Current Date + # Let's wait for one "INSERT INTO data_bin" to appear in stats. + Log To Console Many service checks with 100 metrics each are processed. + FOR ${i} IN RANGE ${1000} + Ctn Process Service Check Result With Metrics host_1 service_${i+1} 1 warning${i} 100 + END + + Log To Console We wait for at least one metric to be written in the database. + # Let's wait for all force checks to be in the storage database. + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + FOR ${i} IN RANGE ${500} + ${output} Query + ... SELECT COUNT(s.last_check) FROM metrics m LEFT JOIN index_data i ON m.index_id = i.id LEFT JOIN services s ON s.host_id = i.host_id AND s.service_id = i.service_id WHERE metric_name LIKE "metric_%%" AND s.last_check >= ${start} + IF ${output[0][0]} >= 1 BREAK + Sleep 1s + END + Disconnect From Database + + Log To Console Let's start some database manipulation... + Ctn Remove P2 From Data Bin + ${start} Get Current Date + + ${content} Create List errno= + FOR ${i} IN RANGE ${6} + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 10 + IF ${result} BREAK + END + + Log To Console Let's recreate the p2 partition... + Ctn Add P2 To Data Bin + + ${start} Ctn Get Round Current Date + Ctn Process Service Check Result With Metrics host_1 service_1 0 Last Output OK 100 + + Log To Console Let's wait for the last service check to be in the database... + Connect To Database pymysql ${DBName} ${DBUser} ${DBPass} ${DBHost} ${DBPort} + FOR ${i} IN RANGE ${120} + ${output} Query SELECT count(*) FROM data_bin WHERE ctime >= ${start} - 10 + Log To Console ${output} + IF ${output[0][0]} >= 100 BREAK + Sleep 1s + END + Log To Console ${output} + Should Be True ${output[0][0]} >= 100 + Disconnect From Database + + Ctn Init Data Bin Without Partition + *** Keywords *** Ctn Test Clean diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index 895c6187de0..249e1239573 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -42,7 +42,7 @@ EBNSVC1 ${result} Ctn Check Number Of Resources Monitored By Poller Is ${3} ${nb_res} 30 Should Be True ${result} Poller 3 should monitor ${nb_srv} services and 16 hosts. END - Ctn Stop engine + Ctn Stop Engine Ctn Kindly Stop Broker Service_increased_huge_check_interval @@ -144,4 +144,4 @@ Service_increased_huge_check_interval ... rra[0].pdp_per_row must be equal to 5400 for metric ${m} END - [Teardown] Run Keywords Ctn Stop engine AND Ctn Kindly Stop Broker + [Teardown] Run Keywords Ctn Stop Engine AND Ctn Kindly Stop Broker diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index 3f4b0068c89..68669c9faa7 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -1689,7 +1689,7 @@ def ctn_get_service_index(host_id: int, service_id: int, timeout: int = 60): my_id = [r['id'] for r in result] if len(my_id) > 0: logger.console( - f"Index data {id} found for service {host_id}:{service_id}") + f"Index data {id} found for service {host_id}:{service_id}") return my_id[0] time.sleep(2) logger.console(f"no index data found for service {host_id}:{service_id}") @@ -2911,3 +2911,100 @@ def ctn_get_broker_log_info(port, log, timeout=TIMEOUT): except: logger.console("gRPC server not ready") return str(res) + + +def ctn_prepare_partitions_for_data_bin(): + """ + Create two partitions for the data_bin table. + The first one named p1 contains data with ctime older than now - 60. + The second one named p2 contains data with ctime older than now + 3600. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + now = int(time.time()) + before = now - 60 + after = now + 3600 + with connection: + with connection.cursor() as cursor: + cursor.execute("DROP TABLE IF EXISTS data_bin") + sql = f"""CREATE TABLE `data_bin` ( + `id_metric` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `value` float DEFAULT NULL, + `status` enum('0','1','2','3','4') DEFAULT NULL, + KEY `index_metric` (`id_metric`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 + PARTITION BY RANGE (`ctime`) +(PARTITION `p1` VALUES LESS THAN ({before}) ENGINE = InnoDB, + PARTITION `p2` VALUES LESS THAN ({after}) ENGINE = InnoDB)""" + cursor.execute(sql) + connection.commit() + + +def ctn_remove_p2_from_data_bin(): + """ + Remove the partition p2 from the data_bin table. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + with connection: + with connection.cursor() as cursor: + cursor.execute("ALTER TABLE data_bin DROP PARTITION p2") + connection.commit() + + +def ctn_add_p2_to_data_bin(): + """ + Add the partition p2 the the data_bin table. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + after = int(time.time()) + 3600 + with connection: + with connection.cursor() as cursor: + cursor.execute( + f"ALTER TABLE data_bin ADD PARTITION (PARTITION p2 VALUES LESS THAN ({after}))") + connection.commit() + + +def ctn_init_data_bin_without_partition(): + """ + Recreate the data_bin table without partition. + """ + connection = pymysql.connect(host=DB_HOST, + user=DB_USER, + password=DB_PASS, + database=DB_NAME_STORAGE, + charset='utf8mb4', + cursorclass=pymysql.cursors.DictCursor) + + now = int(time.time()) + before = now - 60 + after = now + 3600 + with connection: + with connection.cursor() as cursor: + cursor.execute("DROP TABLE IF EXISTS data_bin") + sql = f"""CREATE TABLE `data_bin` ( + `id_metric` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `value` float DEFAULT NULL, + `status` enum('0','1','2','3','4') DEFAULT NULL, + KEY `index_metric` (`id_metric`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1""" + cursor.execute(sql) + connection.commit() diff --git a/tests/resources/resources.resource b/tests/resources/resources.resource index 474c70b1d4f..ac2e0ae7d50 100644 --- a/tests/resources/resources.resource +++ b/tests/resources/resources.resource @@ -370,13 +370,14 @@ Ctn Dump Ba On Error Ctn Process Service Result Hard [Arguments] ${host} ${svc} ${state} ${output} - Repeat Keyword - ... 3 times - ... Ctn Process Service Check Result - ... ${host} - ... ${svc} - ... ${state} - ... ${output} + FOR ${idx} IN RANGE 3 + Ctn Process Service Check Result + ... ${host} + ... ${svc} + ... ${state} + ... ${output} + Sleep 1s + END Ctn Wait For Engine To Be Ready [Arguments] ${start} ${nbEngine}=1 @@ -386,7 +387,7 @@ Ctn Wait For Engine To Be Ready ${result} Ctn Find In Log With Timeout ... ${ENGINE_LOG}/config${i}/centengine.log ... ${start} ${content} 60 - ... verbose=False + ... verbose=False Should Be True ... ${result} ... A message telling check_for_external_commands() should be available in config${i}/centengine.log.